repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
3DG-STFM | 3DG-STFM-master/configs/data/__init__.py | 0 | 0 | 0 | py |
|
3DG-STFM | 3DG-STFM-master/configs/data/megadepth_test_1500.py | from configs.data.base import cfg
'''
TEST_BASE_PATH = "inference/megadepth_test_1500_scene_info"
cfg.DATASET.TEST_DATA_SOURCE = "MegaDepth"
cfg.DATASET.TEST_DATA_ROOT = "data/megadepth/test"
cfg.DATASET.TEST_NPZ_ROOT = f"{TEST_BASE_PATH}"
cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/megadepth_test_1500.txt"
cfg.DATASET.MGDPT_IMG_RESIZE = 840
cfg.DATASET.MIN_OVERLAP_SCORE_TEST = 0.0
'''
TEST_BASE_PATH = "data/megadepth/index"
cfg.DATASET.TEST_DATA_SOURCE = "MegaDepth"
cfg.DATASET.TEST_DATA_ROOT = cfg.DATASET.TEST_DATA_ROOT = "data/megadepth/test"
cfg.DATASET.TEST_NPZ_ROOT = cfg.DATASET.TEST_NPZ_ROOT = f"{TEST_BASE_PATH}/scene_info_val_1500"
cfg.DATASET.TEST_LIST_PATH = cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/trainvaltest_list/val_list.txt"
cfg.DATASET.MIN_OVERLAP_SCORE_TEST = 0.0 # for both test and val
| 830 | 42.736842 | 108 | py |
3DG-STFM | 3DG-STFM-master/configs/data/scannet_trainval.py | from configs.data.base import cfg
TRAIN_BASE_PATH = "data/scannet/index"
cfg.DATASET.TRAINVAL_DATA_SOURCE = "ScanNet"
cfg.DATASET.TRAIN_DATA_ROOT = "data/scannet/train"
cfg.DATASET.TRAIN_NPZ_ROOT = f"{TRAIN_BASE_PATH}/scene_data/train"
cfg.DATASET.TRAIN_LIST_PATH = f"{TRAIN_BASE_PATH}/scene_data/train_list/scannet_all.txt"
cfg.DATASET.TRAIN_INTRINSIC_PATH = f"{TRAIN_BASE_PATH}/intrinsics.npz"
TEST_BASE_PATH = "inference/scannet_test_1500"
cfg.DATASET.TEST_DATA_SOURCE = "ScanNet"
cfg.DATASET.VAL_DATA_ROOT = cfg.DATASET.TEST_DATA_ROOT = "data/scannet/test"
cfg.DATASET.VAL_NPZ_ROOT = cfg.DATASET.TEST_NPZ_ROOT = TEST_BASE_PATH
cfg.DATASET.VAL_LIST_PATH = cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/scannet_test.txt"
cfg.DATASET.VAL_INTRINSIC_PATH = cfg.DATASET.TEST_INTRINSIC_PATH = f"{TEST_BASE_PATH}/intrinsics.npz"
cfg.DATASET.MIN_OVERLAP_SCORE_TEST = 0.0 # for both test and val
| 897 | 48.888889 | 101 | py |
tencent-ml-images | tencent-ml-images-master/finetune.py | #!/usr/bin/python
"""
Tencent is pleased to support the open source community by making Tencent ML-Images available.
Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/BSD-3-Clause
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
from __future__ import print_function
import sys
import os
import time
from datetime import datetime
import tensorflow as tf
from models import resnet as resnet
from data_processing import dataset as file_db
from data_processing import image_preprocessing as image_preprocess
from flags import FLAGS
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
#os.environ["CUDA_VISIBLE_DEVICES"]="0,1,2,3"
def assign_weights_from_cp(cpk_path, sess, scope):
'''
restore from ckpt
'''
reader = tf.train.NewCheckpointReader(cpk_path)
temp = reader.debug_string().decode('utf8')
lines = temp.split("\n")
i = 0
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
with tf.variable_scope(scope, reuse=True):
try:
if key.find(r'global_step')!=-1 or key.find(r'Momentum')!=-1 or key.find(r'logits')!=-1:
print("do not need restore from ckpt key:%s" % key)
continue
var = tf.get_variable(key)
sess.run(var.assign(reader.get_tensor(key)))
print("restore from ckpt key:%s" % key)
except ValueError:
print("can not restore from ckpt key:%s" % key)
def record_parser_fn(value, is_training):
"""Parse an image record from `value`."""
keys_to_features = {
'width': tf.FixedLenFeature([], dtype=tf.int64, default_value=0),
'height': tf.FixedLenFeature([], dtype=tf.int64, default_value=0),
'image': tf.FixedLenFeature([], dtype=tf.string, default_value=''),
'label': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'name': tf.FixedLenFeature([], dtype=tf.string, default_value='')
}
parsed = tf.parse_single_example(value, keys_to_features)
image = tf.image.decode_image(tf.reshape(parsed['image'], shape=[]),
FLAGS.image_channels)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
bbox = tf.concat(axis=0, values=[ [[]], [[]], [[]], [[]] ])
bbox = tf.transpose(tf.expand_dims(bbox, 0), [0, 2, 1])
image = image_preprocess.preprocess_image(
image=image,
output_height=FLAGS.image_size,
output_width=FLAGS.image_size,
object_cover=0.0,
area_cover=0.05,
is_training=is_training,
bbox=bbox)
label = tf.cast(tf.reshape(parsed['label'], shape=[]),dtype=tf.int32)
label = tf.one_hot(label, FLAGS.class_num)
return image, label
def tower_model(images, labels):
model = resnet.ResNet(images, is_training=(FLAGS.mode == tf.estimator.ModeKeys.TRAIN))
model.build_model()
# waring: Calculate loss, which includes softmax cross entropy and L2 regularization.
cross_entropy = tf.losses.softmax_cross_entropy(
logits=model.logit, onehot_labels=labels)
tf.identity(cross_entropy, name='cross_entropy')
tf.summary.scalar('cross_entropy', cross_entropy)
# Add weight decay to the loss. We Add the batch norm variables into L2 normal because
# in large scale data training this will improve the generalization power of model.
loss = cross_entropy + FLAGS.weight_decay * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if
'bn' not in v.name]) + 0.1 * FLAGS.weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bn' in v.name])
return model, loss
def average_gradients(tower_grads):
""" Calculate the average gradient of shared variables across all towers. """
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for grad, var in grad_and_vars:
grads.append(tf.expand_dims(grad, 0))
# Average over the 'tower' dimension.
gradient = tf.reduce_mean(tf.concat(axis=0, values=grads), 0)
v = grad_and_vars[0][1]
grad_and_var = (gradient, v)
average_grads.append(grad_and_var)
return average_grads
def train(train_dataset, is_training=True):
with tf.Graph().as_default(), tf.device('/cpu:0'):
# set global_step and learning_rate
global_step = tf.train.get_or_create_global_step()
lr = tf.train.exponential_decay(
FLAGS.lr,
global_step,
FLAGS.lr_decay_step,
FLAGS.lr_decay_factor,
staircase=True)
# optimizer, default is momentum
if FLAGS.optimizer == "sgd":
optimizer = tf.train.GradientDescentOptimizer(lr)
elif FLAGS.optimizer == "mom":
optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=FLAGS.opt_momentum)
else:
raise ValueError("Do not support optimizer '%s'" % FLAGS.optimizer)
# Get images and labels for training and split the batch across GPUs.
"""Input function which provides batches for train or eval."""
worker_num = 1
num_preprocess_threads = FLAGS.num_preprocess_threads * FLAGS.num_gpus
batch_size = FLAGS.batch_size * FLAGS.num_gpus
print('batch_size={}'.format(batch_size))
dataset = tf.data.Dataset.from_tensor_slices(train_dataset.data_files())
dataset = dataset.shuffle(buffer_size=FLAGS.file_shuffle_buffer, seed=worker_num)
dataset = dataset.flat_map(tf.data.TFRecordDataset)
dataset = dataset.map(lambda value: record_parser_fn(value, is_training),
num_parallel_calls=num_preprocess_threads)
dataset = dataset.prefetch(batch_size)
if is_training:
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes have better performance.
# dataset = dataset.shuffle(buffer_size=_SHUFFLE_BUFFER, seed=worker_id)
dataset = dataset.shuffle(buffer_size=FLAGS.shuffle_buffer)
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
images, labels = iterator.get_next()
images_splits = tf.split(images, FLAGS.num_gpus, 0)
labels_splits = tf.split(labels, FLAGS.num_gpus, 0)
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
# Calculate the gradients for each model tower
Loss = None
tower_grads = []
#building graphs
with tf.variable_scope(tf.get_variable_scope()):
print("Building graph ...", file=sys.stderr)
for i in xrange(FLAGS.num_gpus):
with tf.device("/gpu:%d" % i):
with tf.name_scope("%s_%d" % ("tower", i)) as scope:
# Build graph
model, Loss = tower_model(images_splits[i], labels_splits[i])
# Reuse variables for the next tower
tf.get_variable_scope().reuse_variables()
# Get finetune variables
finetune_vars = []
if FLAGS.FixBlock2:
finetune_vars = [v for v in tf.trainable_variables()
if v.name.find(r"stages_2") != -1 or
v.name.find(r"stages_3") != -1 or
v.name.find(r"global_pool") != -1 or
v.name.find(r"logits") != -1]
else:
finetune_vars = tf.trainable_variables()
# Only the summaries from the final tower are retained
summary = tf.get_collection(tf.GraphKeys.SUMMARIES, scope=scope)
grads = optimizer.compute_gradients(Loss, var_list=finetune_vars)
tower_grads.append(grads)
print("Build Graph (%s/%s)" % (i+1, FLAGS.num_gpus), file=sys.stderr)
summaries.append(summary)
summaries.append(tf.summary.scalar('learning_rate', lr))
# Build train op,
grads = average_gradients(tower_grads)
apply_gradient_op = optimizer.apply_gradients(grads, global_step=global_step)
##############
batchnorm_updates_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = tf.group(apply_gradient_op, batchnorm_updates_op)
# Build Session : may conf carefully
sess = tf.Session(config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=False))
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.max_to_keep)
summary_op = tf.summary.merge(summaries)
summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
# Initialize Model
if FLAGS.restore:
print("Restoring checkpoint from %s" % FLAGS.pretrain_ckpt, file=sys.stderr)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
#restore from existing ckpts
assign_weights_from_cp(FLAGS.pretrain_ckpt, sess, tf.get_variable_scope())
else:
print("Run global_variables_initializer ..", file=sys.stderr)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sys.stdout.write("---------------Trainging Begin---------------\n")
batch_duration = 0.0
# Initial queue runner
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# Start train iter
step = sess.run(global_step)
i=0
while i <= FLAGS.max_iter:
# profile log
if i > 0 and i % FLAGS.prof_interval == 0:
print("%s: step %d, iteration %d, %.2f sec/batch" %
(datetime.now(), step, i, batch_duration))
# log
if i > 0 and i % FLAGS.log_interval == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, i)
# checkpoint
if i > 0 and i % FLAGS.snapshot == 0:
if not os.path.exists(FLAGS.model_dir):
os.mkdir(FLAGS.model_dir)
ckpt_path = os.path.join(FLAGS.model_dir, "resnet.ckpt")
saver.save(sess, ckpt_path, global_step=global_step)
# train
batch_start = time.time()
_, step, loss = sess.run([train_op, global_step, Loss])
batch_duration = time.time() - batch_start
i = i + 1
print("%s: step %d, iteration %d, train loss %.2f " % (datetime.now(), step, i, loss))
coord.request_stop()
def main(_):
train_dataset = file_db.Dataset(os.path.join(FLAGS.data_dir, 'train'))
train(train_dataset, is_training=(FLAGS.mode == tf.estimator.ModeKeys.TRAIN))
if __name__ == "__main__":
tf.app.run()
| 11,795 | 42.688889 | 305 | py |
tencent-ml-images | tencent-ml-images-master/flags.py | #!/usr/bin/python
"""
Tencent is pleased to support the open source community by making Tencent ML-Images available.
Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/BSD-3-Clause
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
"""Global Options
"""
tf.app.flags.DEFINE_string('mode', 'train',
"run coder in tain or validation mode")
tf.app.flags.DEFINE_integer('max_to_keep', 200,
"save checkpoint here")
"""Data Options
"""
tf.app.flags.DEFINE_string('data_dir', './data/train/',
"Path to the data TFRecord of Example protos. Should save in train and val")
tf.app.flags.DEFINE_integer('batch_size', 512,
"Number of images to process in a batch.")
tf.app.flags.DEFINE_integer('num_preprocess_threads', 4,
"Number of preprocessing threads per tower. Please make this a multiple of 4")
tf.app.flags.DEFINE_integer('file_shuffle_buffer', 1500,
"buffer size for file names")
tf.app.flags.DEFINE_integer('shuffle_buffer', 2048,
"buffer size for samples")
tf.app.flags.DEFINE_boolean('with_bbox', True,
"whether use bbox in train set")
"""Model Options
"""
tf.app.flags.DEFINE_integer('class_num', 1000,
"distinct class number")
tf.app.flags.DEFINE_integer('resnet_size', 101,
"resnet block layer number [ 18, 34, 50, 101, 152, 200 ]")
tf.app.flags.DEFINE_string('data_format', 'channels_first',
"data format for the input and output data [ channels_first | channels_last ]")
tf.app.flags.DEFINE_integer('image_size', 224,
"default image size for model input layer")
tf.app.flags.DEFINE_integer('image_channels', 3,
"default image channels for model input layer")
tf.app.flags.DEFINE_float('batch_norm_decay', 0.997,
"use for batch normal moving avg")
tf.app.flags.DEFINE_float('batch_norm_epsilon', 1e-5,
"use for batch normal layer, for avoid divide by zero")
tf.app.flags.DEFINE_float('mask_thres', 0.7,
"mask thres for balance pos neg")
tf.app.flags.DEFINE_float('neg_select', 0.3,
"how many class within only negtive samples in a batch select to learn")
"""Train Options
"""
tf.app.flags.DEFINE_boolean('restore', False,
"whether to restore weights from pretrained checkpoint.")
tf.app.flags.DEFINE_integer('num_gpus', 1,
"How many GPUs to use.")
tf.app.flags.DEFINE_string('optimizer','mom',
"optimation algorthm")
tf.app.flags.DEFINE_float('opt_momentum', 0.9,
"moment during learing")
tf.app.flags.DEFINE_float('lr', 0.1,
"Initial learning rate.")
tf.app.flags.DEFINE_integer('lr_decay_step', 0,
"Iterations after which learning rate decays.")
tf.app.flags.DEFINE_float('lr_decay_factor', 0.1,
"Learning rate decay factor.")
tf.app.flags.DEFINE_float('weight_decay', 0.0001,
"Tainable Weight l2 loss factor.")
tf.app.flags.DEFINE_integer('warmup', 0,
"Steps when stop warmup, need when use distributed learning")
tf.app.flags.DEFINE_float('lr_warmup', 0.1,
"Initial warmup learning rate, need when use distributed learning")
tf.app.flags.DEFINE_integer('lr_warmup_decay_step', 0,
"Iterations after which learning rate decays, need when use distributed learning")
tf.app.flags.DEFINE_float('lr_warmup_decay_factor', 1.414,
"Warmup learning rate decay factor, need when use distributed learning")
tf.app.flags.DEFINE_integer('max_iter', 1000000,
"max iter number for stopping.-1 forever")
tf.app.flags.DEFINE_integer('test_interval', 0,
"iterations interval for evluate model")
tf.app.flags.DEFINE_integer('test_iter', 0,
"iterations for evluate model")
tf.app.flags.DEFINE_integer('prof_interval', 10,
"iterations for print training time cost")
tf.app.flags.DEFINE_integer('log_interval', 0,
"iterations for print summery log")
tf.app.flags.DEFINE_string('log_dir', './out/log/',
"Directory where to write event logs")
tf.app.flags.DEFINE_string('model_dir', './out/checkpoint/',
"path for saving learned tf model")
tf.app.flags.DEFINE_string('tmp_model_dir', './out/tmp/checkpoint/',
"The directory where the temporary model will be stored")
tf.app.flags.DEFINE_integer('snapshot', 0,
"Iteration for saving model snapshot")
tf.app.flags.DEFINE_integer('epoch_iter', 0,
"Iteration for epoch ")
tf.app.flags.DEFINE_float('drop_rate', 0.5,
"DropOut rate")
tf.app.flags.DEFINE_integer('random_seed', 1234,
"Random sedd for neigitive class selected")
tf.app.flags.DEFINE_string('pretrain_ckpt', '',
'pretrain checkpoint file')
tf.app.flags.DEFINE_boolean('FixBlock2', False,
'whether to fix the first two block, used for fintuning')
"""eval options
"""
tf.app.flags.DEFINE_integer('visiable_gpu', 0,
"wihch gpu can use")
tf.app.flags.DEFINE_string('piclist', '',
"eval picture list")
tf.app.flags.DEFINE_integer('interval', 32,
"eval chekpoint interval")
tf.app.flags.DEFINE_integer('start', 0,
"the start index of ckpts")
| 5,301 | 42.105691 | 305 | py |
tencent-ml-images | tencent-ml-images-master/extract_feature.py | #!/usr/bin/python
"""
Tencent is pleased to support the open source community by making Tencent ML-Images available.
Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/BSD-3-Clause
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
"""Use pre-trained model extract image feature
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import cv2 as cv
import tensorflow as tf
from models import resnet as resnet
from flags import FLAGS
tf.app.flags.DEFINE_string("result", "",
"file name to save features")
tf.app.flags.DEFINE_string("images", "",
"contains image path per line per image")
"""Crop Image To 224*224
Args:
img: an 3-D numpy array (H,W,C)
type: crop method support [ center | 10crop ]
"""
def preprocess(img, type="center"):
# resize image with smallest side to be 256
rawH = float(img.shape[0])
rawW = float(img.shape[1])
newH = 256.0
newW = 256.0
if rawH <= rawW:
newW = (rawW/rawH) * newH
else:
newH = (rawH/rawW) * newW
img = cv.resize(img, (int(newW), int(newH)))
imgs = None
if type=='center':
imgs = np.zeros((1, 224, 224, 3))
imgs[0,...] = img[int((newH-224)/2):int((newH-224)/2)+224,
int((newW-224)/2):int((newW-224)/2)+224]
elif type=='10crop':
imgs = np.zeros((10, 224, 224, 3))
offset = [(0, 0),
(0, int(newW-224)),
(int(newH-224), 0),
(int(newH-224), int(newW-224)),
(int((newH-224)/2), int((newW-224)/2))]
for i in range(0, 5):
imgs[i,...] = img[offset[i][0]:offset[i][0]+224,
offset[i][1]:offset[i][1]+224]
img = cv.flip(img, 1)
for i in range(0, 5):
imgs[i+5,...] = img[offset[i][0]:offset[i][0]+224,
offset[i][1]:offset[i][1]+224]
else:
raise ValueError("Type not support")
imgs = ((imgs/255.0) - 0.5) * 2.0
imgs = imgs[...,::-1]
return imgs
# build model
images = tf.placeholder(dtype=tf.float32, shape=[None, 224, 224, 3])
net = resnet.ResNet(images, is_training=False)
net.build_model()
logits = net.logit
feat = net.feat
# restore model
saver = tf.train.Saver(tf.global_variables())
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(FLAGS.visiable_gpu)
config.log_device_placement=False
sess = tf.Session(config=config)
# load trained model
saver.restore(sess, FLAGS.pretrain_ckpt)
# inference on net
types='center'
ffeat = open(FLAGS.result, 'w')
with open(FLAGS.images, 'r') as lines:
for line in lines:
sp = line.rstrip('\n').split(' ')
raw_img = cv.imread(sp[0])
if type(raw_img)==None or raw_img.data==None :
print("open pic " + sp[0] + " failed")
continue
imgs = preprocess(raw_img, types)
feats = sess.run(feat, {images:imgs})
feats = np.squeeze(feats[0])
if types=='10crop':
feats = np.mean(feats, axis=0)
print('feature-length:{}, feature={}'.format(len(feats), feats))
ffeat.write(sp[0] + "\t" + sp[1] + "\t" + " ".join([str(x) for x in list(feats)]) + '\n')
ffeat.close()
| 3,623 | 33.188679 | 305 | py |
tencent-ml-images | tencent-ml-images-master/image_classification.py | #!/usr/bin/python
"""
Tencent is pleased to support the open source community by making Tencent ML-Images available.
Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/BSD-3-Clause
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
"""Use the saved checkpoint to run single-label image classification"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import cv2 as cv
import tensorflow as tf
from models import resnet as resnet
from flags import FLAGS
tf.app.flags.DEFINE_string("result", "label_pred.txt",
"file name to save predictions")
tf.app.flags.DEFINE_string("images", "",
"contains image path per line per image")
tf.app.flags.DEFINE_integer("top_k_pred", 5,
"the top-k predictions")
tf.app.flags.DEFINE_string("dictionary", "",
"the class dictionary of imagenet-2012")
def _load_dictionary(dict_file):
dictionary = dict()
with open(dict_file, 'r') as lines:
for line in lines:
sp = line.rstrip('\n').split('\t')
idx, name = sp[0], sp[1]
dictionary[idx] = name
return dictionary
def preprocess(img):
rawH = float(img.shape[0])
rawW = float(img.shape[1])
newH = 256.0
newW = 256.0
test_crop = 224.0
if rawH <= rawW:
newW = (rawW/rawH) * newH
else:
newH = (rawH/rawW) * newW
img = cv.resize(img, (int(newW), int(newH)))
img = img[int((newH-test_crop)/2):int((newH-test_crop)/2)+int(test_crop),int((newW-test_crop)/2):int((newW-test_crop)/2)+int(test_crop)]
img = ((img/255.0) - 0.5) * 2.0
img = img[...,::-1]
return img
# build model
images = tf.placeholder(dtype=tf.float32, shape=[None, 224, 224, 3])
net = resnet.ResNet(images, is_training=False)
net.build_model()
logit = net.logit
prob = tf.nn.softmax(logit)
prob_topk, pred_topk = tf.nn.top_k(prob, k=FLAGS.top_k_pred)
# restore model
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(FLAGS.visiable_gpu)
config.log_device_placement=False
sess = tf.Session(config=config)
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, FLAGS.model_dir)
dictionary = _load_dictionary(FLAGS.dictionary)
# inference
types= 'center'#'10crop'
orig_stdout = sys.stdout
f = open(FLAGS.result, 'w')
sys.stdout = f
with open(FLAGS.images, 'r') as lines:
for line in lines:
sp = line.rstrip('\n').split('\t')
raw_img = cv.imread(sp[0])
if type(raw_img)==None or raw_img.data==None :
print("open pic " + sp[0] + " failed")
continue
#imgs = preprocess(raw_img, types)
img = preprocess(raw_img)
logits, probs_topk, preds_topk = sess.run([logit, prob_topk, pred_topk],
{images:np.expand_dims(img, axis=0)})
probs_topk = np.squeeze(probs_topk)
preds_topk = np.squeeze(preds_topk)
names_topk = [dictionary[str(i)] for i in preds_topk]
print('+++ the predictions of {} is:'.format(sp[0]))
for i, pred in enumerate(preds_topk):
print('%d %s: %.3f' % (pred, names_topk[i], probs_topk[i]))
sys.stdout = orig_stdout
f.close()
| 3,620 | 34.5 | 305 | py |
tencent-ml-images | tencent-ml-images-master/train.py | """
Tencent is pleased to support the open source community by making Tencent ML-Images available.
Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/BSD-3-Clause
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
"""Runs a ResNet model on the ImageNet dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import math
import numpy as np
import tensorflow as tf
from data_processing import dataset as file_db
from data_processing import image_preprocessing as image_preprocess
from models import resnet as resnet
from flags import FLAGS
def record_parser_fn(value, is_training):
"""Parse an image record from `value`."""
keys_to_features = {
'width': tf.FixedLenFeature([], dtype=tf.int64, default_value=0),
'height': tf.FixedLenFeature([], dtype=tf.int64, default_value=0),
'image': tf.FixedLenFeature([], dtype=tf.string, default_value=''),
'label': tf.FixedLenFeature([], dtype=tf.string, default_value=''),
'name': tf.FixedLenFeature([], dtype=tf.string, default_value='')
}
parsed = tf.parse_single_example(value, keys_to_features)
image = tf.image.decode_image(tf.reshape(parsed['image'], shape=[]),
FLAGS.image_channels)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
bbox = tf.concat(axis=0, values=[ [[]], [[]], [[]], [[]] ])
bbox = tf.transpose(tf.expand_dims(bbox, 0), [0, 2, 1])
image = image_preprocess.preprocess_image(
image=image,
output_height=FLAGS.image_size,
output_width=FLAGS.image_size,
object_cover=0.7,
area_cover=0.7,
is_training=is_training,
bbox=bbox)
label = tf.reshape(tf.decode_raw(parsed['label'], tf.float32), shape=[FLAGS.class_num,])
return image, label
def input_fn(is_training, data_dir, batch_size, num_epochs=1):
"""Input function which provides batches for train or eval."""
dataset = None
if is_training:
dataset = file_db.Dataset(os.path.join(data_dir, 'train'))
else:
dataset = file_db.Dataset(os.path.join(data_dir, 'val'))
worker_id = 0
worker_num = 1
dataset = tf.data.Dataset.from_tensor_slices(dataset.data_files())
# divide the dataset
if is_training:
dataset = dataset.shuffle(buffer_size=FLAGS.file_shuffle_buffer, seed=worker_num)
dataset = dataset.shard(worker_num, worker_id)
dataset = dataset.flat_map(tf.data.TFRecordDataset)
dataset = dataset.map(lambda value: record_parser_fn(value, is_training),
num_parallel_calls=5)
dataset = dataset.prefetch(batch_size)
if is_training:
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes have better performance.
# dataset = dataset.shuffle(buffer_size=_SHUFFLE_BUFFER, seed=worker_id)
dataset = dataset.shuffle(buffer_size=FLAGS.shuffle_buffer)
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
images, labels = iterator.get_next()
return images, labels
def resnet_model_fn(features, labels, mode, params):
"""Our model_fn for ResNet to be used with our Estimator."""
tf.summary.image('images', features, max_outputs=6)
# build model
net = resnet.ResNet(features, is_training=(mode == tf.estimator.ModeKeys.TRAIN))
logits = net.build_model()
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate loss, which includes softmax cross entropy and L2 regularization.
# a. get loss coeficiente
pos_mask = tf.reduce_sum(
tf.cast(
tf.greater_equal(
labels, tf.fill(tf.shape(labels), FLAGS.mask_thres)),
tf.float32),
0)
pos_curr_count = tf.cast(tf.greater( pos_mask, 0), tf.float32)
neg_curr_count = tf.cast(tf.less_equal(pos_mask, 0), tf.float32)
pos_count = tf.Variable(tf.zeros(shape=[FLAGS.class_num,]), trainable=False)
neg_count = tf.Variable(tf.zeros(shape=[FLAGS.class_num,]), trainable=False)
neg_select = tf.cast(
tf.less_equal(
tf.random_uniform(
shape=[FLAGS.class_num,],
minval=0, maxval=1,
seed = FLAGS.random_seed),
FLAGS.neg_select),
tf.float32)
tf.summary.histogram('pos_curr_count', pos_curr_count)
tf.summary.histogram('neg_curr_count', neg_curr_count)
tf.summary.histogram('neg_select', neg_select)
with tf.control_dependencies([pos_curr_count, neg_curr_count, neg_select]):
pos_count = tf.assign_sub(
tf.assign_add(pos_count, pos_curr_count),
tf.multiply(pos_count, neg_curr_count))
neg_count = tf.assign_sub(
tf.assign_add(neg_count, tf.multiply(neg_curr_count, neg_select)),
tf.multiply(neg_count, pos_curr_count))
tf.summary.histogram('pos_count', pos_count)
tf.summary.histogram('neg_count', neg_count)
pos_loss_coef = -1 * (tf.log((0.01 + pos_count)/10)/tf.log(10.0))
pos_loss_coef = tf.where(
tf.greater(pos_loss_coef, tf.fill(tf.shape(pos_loss_coef), 0.01)),
pos_loss_coef,
tf.fill(tf.shape(pos_loss_coef), 0.01))
pos_loss_coef = tf.multiply(pos_loss_coef, pos_curr_count)
tf.summary.histogram('pos_loss_coef', pos_loss_coef)
neg_loss_coef = -1 * (tf.log((8 + neg_count)/10)/tf.log(10.0))
neg_loss_coef = tf.where(
tf.greater(neg_loss_coef, tf.fill(tf.shape(neg_loss_coef), 0.01)),
neg_loss_coef,
tf.fill(tf.shape(neg_loss_coef), 0.001))
neg_loss_coef = tf.multiply(neg_loss_coef, tf.multiply(neg_curr_count, neg_select))
tf.summary.histogram('neg_loss_coef', neg_loss_coef)
loss_coef = tf.add(pos_loss_coef, neg_loss_coef)
tf.summary.histogram('loss_coef', loss_coef)
# b. get non-negative mask
non_neg_mask = tf.fill(tf.shape(labels), -1.0, name='non_neg')
non_neg_mask = tf.cast(tf.not_equal(labels, non_neg_mask), tf.float32)
tf.summary.histogram('non_neg', non_neg_mask)
# cal loss
cross_entropy = tf.nn.weighted_cross_entropy_with_logits(
logits=logits, targets=labels, pos_weight=12, name='sigmod_cross_entropy')
tf.summary.histogram('sigmod_ce', cross_entropy)
cross_entropy_cost = tf.reduce_sum(tf.reduce_mean(cross_entropy * non_neg_mask, axis=0) * loss_coef)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy_cost, name='cross_entropy')
tf.summary.scalar('cross_entropy', cross_entropy_cost)
# Add weight decay to the loss. We exclude the batch norm variables because
# doing so leads to a small improvement in accuracy.
loss = cross_entropy_cost + FLAGS.weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'batch_normalization' not in v.name])
if mode == tf.estimator.ModeKeys.TRAIN:
# Scale the learning rate linearly with the batch size. When the batch size
# is 256, the learning rate should be 0.1.
lr_warmup = FLAGS.lr_warmup
warmup_step = FLAGS.warmup
warmup_decay_step = FLAGS.lr_warmup_decay_step
warmup_decay_factor = FLAGS.lr_warmup_decay_factor
global_step = tf.train.get_or_create_global_step()
boundaries = [
int(FLAGS.lr_decay_step * epoch) for epoch in [1, 2, 3, 4]]
values = [
FLAGS.lr * decay for decay in [1, 0.1, 0.01, 1e-3, 1e-4]]
learning_rate = tf.train.piecewise_constant(
tf.cast(global_step, tf.int32), boundaries, values)
# Linear Scaling Rule and Gradual Warmup
lr = tf.cond(
global_step < warmup_step,
lambda: tf.train.exponential_decay(
lr_warmup,
global_step,
warmup_decay_step,
warmup_decay_factor,
staircase=True
),
lambda: learning_rate
)
# Create a tensor named learning_rate for logging purposes.
tf.identity(lr, name='learning_rate')
tf.summary.scalar('learning_rate', lr)
optimizer = tf.train.MomentumOptimizer(
learning_rate=lr,
momentum=FLAGS.opt_momentum)
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step)
else:
train_op = None
# Build evaluate metrics
accuracy = tf.metrics.accuracy(
tf.argmax(labels, axis=1), predictions['classes'])
metrics = {'accuracy': accuracy}
tf.identity(accuracy[1], name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
def main(_):
# Using the Winograd non-fused algorithms provides a small performance boost.
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(FLAGS.visiable_gpu)
model_path = FLAGS.model_dir
max_ckp_num = (FLAGS.max_to_keep)
run_config = tf.estimator.RunConfig(save_checkpoints_steps=FLAGS.snapshot,
keep_checkpoint_max=max_ckp_num,
session_config=config,
save_summary_steps=FLAGS.log_interval)
resnet_classifier = tf.estimator.Estimator(
model_fn=resnet_model_fn,
model_dir=model_path,
config=run_config,
params={
'resnet_size': FLAGS.resnet_size,
'data_format': FLAGS.data_format,
'batch_size': FLAGS.batch_size,
}
)
tensors_to_log = {
'learning_rate': 'learning_rate',
'cross_entropy': 'cross_entropy',
'train_accuracy': 'train_accuracy'
}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=FLAGS.log_interval, at_end=True)
print('Total run steps = {}'.format(FLAGS.max_iter))
hook_list = [logging_hook]
resnet_classifier.train(
input_fn=lambda: input_fn(True, FLAGS.data_dir, FLAGS.batch_size),
steps=FLAGS.max_iter,
hooks=hook_list
)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 11,270 | 39.253571 | 305 | py |
tencent-ml-images | tencent-ml-images-master/models/resnet.py | """ResNet model
Related papers:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.insert(0, '../')
from flags import FLAGS
import tensorflow as tf
class ResNet(object):
def __init__(self, images, is_training):
"""Net constructor
Args:
images: 4-D Tensor of images with Shape [batch_size, image_size, image_size, 3]
is_training: bool, used in batch normalization
Return:
A wrapper For building model
"""
self.is_training = is_training
self.filters = [256, 512, 1024, 2048] # feature map size for each stages
self.strides = [2, 2, 2, 2] # conv strides for each stages's first block
if FLAGS.resnet_size == 50: # resnet size paramters
self.stages = [3, 4, 6, 3]
elif FLAGS.resnet_size == 101:
self.stages = [3, 4, 23, 3]
elif FLAGS.resnet_size == 152:
self.stages = [3, 8, 36, 3]
else:
raise ValueError('resnet_size %d Not implement:' % FLAGS.resnet_size)
self.data_format = FLAGS.data_format
self.num_classes = FLAGS.class_num
self.images = images
if self.data_format == "NCHW":
self.images = tf.transpose(images, [0, 3, 1, 2])
def build_model(self):
# Initial net
with tf.variable_scope('init'):
x = self.images
x = self._pre_padding_conv('init_conv', x, 7, 64, 2)
# 4 stages
for i in range(0, len(self.stages)):
with tf.variable_scope('stages_%d_block_%d' % (i,0)):
x = self._bottleneck_residual(
x,
self.filters[i],
self.strides[i],
'conv',
self.is_training)
for j in range(1, self.stages[i]):
with tf.variable_scope('stages_%d_block_%d' % (i,j)):
x = self._bottleneck_residual(
x,
self.filters[i],
1,
'identity',
self.is_training)
# class wise avg pool
with tf.variable_scope('global_pool'):
x = self._batch_norm('bn', x, self.is_training)
x = self._relu(x)
x = self._global_avg_pool(x)
# extract features
self.feat=x
# logits
with tf.variable_scope("logits"):
self.logit = self._fully_connected(x, out_dim=self.num_classes)
return self.logit
def _bottleneck_residual(self, x, out_channel, strides, _type, is_training):
"""Residual Block
Args:
x : A 4-D tensor
out_channels : out feature map size of residual block
strides : conv strides of block
_type: short cut type, 'conv' or 'identity'
is_training : A Boolean for whether the model is in training or inference mdoel
"""
# short cut
orig_x = x
if _type=='conv':
orig_x = self._batch_norm('conv1_b1_bn', orig_x, is_training)
orig_x = self._relu(orig_x)
orig_x = self._pre_padding_conv('conv1_b1', orig_x, 1, out_channel, strides)
# bottleneck_residual_block
x = self._batch_norm('conv1_b2_bn', x, is_training)
x = self._relu(x)
x = self._pre_padding_conv('conv1_b2', x, 1, out_channel/4, 1)
x = self._batch_norm('conv2_b2_bn', x, is_training)
x = self._relu(x)
x = self._pre_padding_conv('conv2_b2', x, 3, out_channel/4, strides)
x = self._batch_norm('conv3_b2_bn', x, is_training)
x = self._relu(x)
x = self._pre_padding_conv('conv3_b2', x, 1, out_channel, 1)
# sum
return x + orig_x
def _batch_norm(self, name, x, is_training=True):
"""Batch normalization.
Considering the performance, we use batch_normalization in contrib/layers/python/layers/layers.py
instead of tf.nn.batch_normalization and set fused=True
Args:
x: input tensor
is_training: Whether to return the output in training mode or in inference mode, use the argment
in finetune
"""
with tf.variable_scope(name):
return tf.layers.batch_normalization(
inputs=x,
axis=1 if self.data_format == 'NCHW' else 3,
momentum = FLAGS.batch_norm_decay,
epsilon = FLAGS.batch_norm_epsilon,
center=True,
scale=True,
training=is_training,
fused=True
)
def _pre_padding(self, x, kernel_size):
"""Padding Based On Kernel_size"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if self.data_format == 'NCHW':
x = tf.pad(x, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
else:
x = tf.pad(x, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return x
def _pre_padding_conv(self, name, x, kernel_size, out_channels, strides, bias=False):
"""Convolution
As the way of padding in conv is depended on input size and kernel size, which is very different with caffe
So we will do pre-padding to Align the padding operation.
Args:
x : A 4-D tensor
kernel_size : size of kernel, here we just use square conv kernel
out_channels : out feature map size
strides : conv stride
bias : bias may always be false
"""
if strides > 1:
x = self._pre_padding(x, kernel_size)
with tf.variable_scope(name):
return tf.layers.conv2d(
inputs = x,
filters = out_channels,
kernel_size=kernel_size,
strides=strides,
padding=('SAME' if strides == 1 else 'VALID'),
use_bias=bias,
kernel_initializer=tf.variance_scaling_initializer(),
data_format= 'channels_first' if self.data_format == 'NCHW' else 'channels_last')
def _relu(self, x, leakiness=0.0):
"""
Relu. With optical leakiness support
Note: if leakiness set zero, we will use tf.nn.relu for concern about performance
Args:
x : A 4-D tensor
leakiness : slope when x < 0
"""
if leakiness==0.0:
return tf.nn.relu(x)
else:
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _global_avg_pool(self, x):
"""
Global Average Pool, for concern about performance we use tf.reduce_mean
instead of tf.layers.average_pooling2d
Args:
x: 4-D Tensor
"""
assert x.get_shape().ndims == 4
axes = [2, 3] if self.data_format == 'NCHW' else [1, 2]
return tf.reduce_mean(x, axes, keep_dims=True)
def _fully_connected(self, x, out_dim):
"""
As tf.layers.dense need 2-D tensor, reshape it first
Args:
x : 4-D Tensor
out_dim : dimensionality of the output space.
"""
assert x.get_shape().ndims == 4
axes = 1 if self.data_format == 'NCHW' else -1
x = tf.reshape(x, shape=[-1, x.get_shape()[axes]])
return tf.layers.dense(x, units = out_dim)
| 7,099 | 33.803922 | 111 | py |
tencent-ml-images | tencent-ml-images-master/models/__init__.py | 0 | 0 | 0 | py |
|
tencent-ml-images | tencent-ml-images-master/data_processing/image_preprocessing.py | """
Tencent is pleased to support the open source community by making Tencent ML-Images available.
Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/BSD-3-Clause
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
"""Read and preprocess image data.
Image processing occurs on a single image at a time. Image are read and
preprocessed in parallel across multiple threads. The resulting images
are concatenated together to form a single batch for training or evaluation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
def rotate_image(image, thread_id=0, scope=None):
"""Rotate image
thread_id comes from {0, 1, 2, 3} uniformly,
we will apply rotation on 1/4 images of the trainning set
Args:
image: Tensor containing single image.
thread_id: preprocessing thread ID.
scope: Optional scope for name_scope.
Returns:
rotated image
"""
with tf.name_scope(name=scope, default_name='rotate_image'):
angle = tf.random_uniform([], minval=-45*math.pi/180, maxval=45*math.pi/180, dtype=tf.float32, name="angle")
distorted_image = tf.cond(
tf.equal(thread_id, tf.constant(0, dtype=tf.int32)),
lambda: tf.contrib.image.rotate(image, angle),
lambda: image
)
return distorted_image
def distort_color(image, thread_id=0, scope=None):
"""Distort the color of the image.
thread_id comes from {0, 1, 2, 3} uniformly,
and we will apply color distortion when thresd_id = 0 or 1,
thus, only 1/2 images of the trainning set will be distorted
Args:
image: Tensor containing single image.
thread_id: preprocessing thread ID.
scope: Optional scope for name_scope.
Returns:
color-distorted image
"""
with tf.name_scope(name=scope, default_name='distort_color'):
def color_ordering_0(image):
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
return image
def color_ordering_1(image):
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
return image
image = tf.cond(
tf.equal(thread_id, tf.constant(0, dtype=tf.int32)),
lambda: color_ordering_0(image),
lambda: image
)
image = tf.cond(
tf.equal(thread_id, tf.constant(1, dtype=tf.int32)),
lambda: color_ordering_1(image),
lambda: image
)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def distort_image(image, height, width, object_cover, area_cover, bbox, thread_id=0, scope=None):
"""Distort one image for training a network.
Args:
image: Tensor containing single image
height: integer, image height
width: integer, image width
object_cover: float
area_cover: float
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
thread_id: integer indicating the preprocessing thread.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of distorted image used for training.
"""
with tf.name_scope(name=scope, default_name='distort_image'):
# Crop the image to the specified bounding box.
bbox_begin, bbox_size, distort_bbox = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=object_cover,
aspect_ratio_range=[0.75, 1.33],
area_range=[area_cover, 1.0],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
distorted_image = tf.slice(image, bbox_begin, bbox_size)
# Resize the image to net input shape
distorted_image = tf.image.resize_images(distorted_image, [height, width])
distorted_image.set_shape([height, width, 3])
# Flip image, we just apply horizontal flip on 1/2 images of the trainning set
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Rotate image
distorted_image = rotate_image(distorted_image, thread_id)
# Distored image color
distorted_image = distort_color(distorted_image, thread_id)
return distorted_image
def eval_image(image, height, width, scope=None):
"""Prepare one image for evaluation.
Args:
image: Tensor containing single image
height: integer
width: integer
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(values=[image, height, width], name=scope, default_name='eval_image'):
# Crop the central region of the image with an area containing 80% of the original image.
image = tf.image.central_crop(image, central_fraction=0.80)
# Resize the image to the original height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, [0])
return image
def image_preprocessing(image, output_height, output_width, object_cover, area_cover, train, bbox):
"""Decode and preprocess one image for evaluation or training.
Args:
image: Tensor containing single image
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
output_height: integer
output_width: integer
train: boolean
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if train:
thread_id = tf.random_uniform([], minval=0, maxval=3, dtype=tf.int32, name="thread_id")
image = distort_image(image, output_height, output_width, object_cover, area_cover, bbox, thread_id)
else:
image = eval_image(image, output_height, output_width)
# Finally, rescale to [-1,1] instead of [0, 1)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
image = tf.reshape(image, shape=[output_height, output_width, 3])
return image
def preprocess_image(image, output_height, output_width, object_cover, area_cover, is_training=False, bbox=None):
return image_preprocessing(image, output_height, output_width, object_cover, area_cover, is_training, bbox)
| 7,279 | 41.080925 | 305 | py |
tencent-ml-images | tencent-ml-images-master/data_processing/dataset.py | """
Tencent is pleased to support the open source community by making Tencent ML-Images available.
Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/BSD-3-Clause
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
"""Small library that points to a data set.
Methods of Data class:
data_files: Returns a python list of all (sharded) data set files.
reader: Return a reader for a single entry from the data set.
"""
import os
import tensorflow as tf
from datetime import datetime
class Dataset(object):
def __init__(self, data_dir, worker_hosts = [], task_id = 0, use_split = False, record_pattern='*tfrecords'):
"""Initialize dataset the path to the data."""
self.data_dir = data_dir
self.worker_hosts = worker_hosts
self.task_id = task_id
self.use_split = use_split
self.record_pattern = record_pattern
def data_filter(self, file_name):
idx = int(file_name.split('/')[-1].split('.tfrecords')[0])
return (idx % len(self.worker_hosts) == self.task_id)
def data_files(self):
"""Returns a python list of all (sharded) data files.
Returns:
python list of all (sharded) data set files.
Raises:
ValueError: if there are not data_files in the data dir
"""
tf_record_pattern = os.path.join(self.data_dir, self.record_pattern)
data_files = tf.gfile.Glob(tf_record_pattern)
data_files = filter(self.data_filter, data_files) if self.use_split else data_files
if not data_files:
print('No files found for in data dir %s' % (self.data_dir))
exit(-1)
tf.logging.info('[%s] Worker[%d/%d] Files[%d] TrainDir[%s]' %
(datetime.now(), self.task_id, len(self.worker_hosts), len(data_files), self.data_dir))
return data_files
def reader(self):
"""Return a reader for a single entry from the data set.
See io_ops.py for details of Reader class.
Returns:
Reader object that reads the data set.
"""
return tf.TFRecordReader()
| 2,371 | 39.896552 | 305 | py |
tencent-ml-images | tencent-ml-images-master/data_processing/__init__.py | 0 | 0 | 0 | py |
|
tencent-ml-images | tencent-ml-images-master/data/download_urls_multithreading.py | #!/usr/bin/env python
"""
Tencent is pleased to support the open source community by making Tencent ML-Images available.
Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/BSD-3-Clause
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
import os
import sys
import urllib
import argparse
import threading,signal
import time
import socket
socket.setdefaulttimeout(10.0)
def downloadImg(start, end, url_list, save_dir):
global record,count,count_invalid,is_exit
im_names = []
with open(url_list, 'r') as url_f:
for line in url_f.readlines()[start:end]:
sp = line.rstrip('\n').split('\t')
url = sp[0]
url_list = url.split('/')
im_name = url_list[-2] + '_' + url_list[-1]
try:
urllib.urlretrieve(url, os.path.join(save_dir, im_name))
record += 1
im_file_Record.write(im_name + '\t' + '\t'.join(sp[1:]) + '\n')
print('url = {} is finished and {} imgs have been downloaded of all {} imgs'.format(url, record, count))
except IOError as e:
print ("The url:{} is ***INVALID***".format(url))
invalid_file.write(url + '\n')
count_invalid += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--url_list', type=str, help='the url list file')
parser.add_argument('--im_list', type=str, default='img.txt',help='the image list file')
parser.add_argument('--num_threads', type=int, default=8, help='the num of processing')
parser.add_argument('--save_dir', type=str, default='./images', help='the directory to save images')
args = parser.parse_args()
url_list = args.url_list
im_list = args.im_list
num_threads = args.num_threads
save_dir = args.save_dir
# create savedir
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
count = 0 # the num of urls
count_invalid = 0 # the num of invalid urls
record = 0
with open(url_list,'r') as f:
for line in f:
count += 1
part = int(count/num_threads)
with open(im_list, 'w') as im_file_Record,open('invalid_url.txt','w') as invalid_file: # record the downloaded imgs
thread_list = []
for i in range(num_threads):
if(i == num_threads-1):
t = threading.Thread(target = downloadImg, kwargs={'start':i*part, 'end':count, 'url_list':url_list, 'save_dir':save_dir})
else:
t = threading.Thread(target = downloadImg, kwargs={'start':i*part, 'end':(i+1)*part, 'url_list':url_list, 'save_dir':save_dir})
t.setDaemon(True)
thread_list.append(t)
t.start()
for i in range(num_threads):
try:
while thread_list[i].isAlive():
pass
except KeyboardInterrupt:
break
if count_invalid==0:
print ("all {} imgs have been downloaded!".format(count))
else:
print("{}/{} imgs have been downloaded, {} URLs are invalid".format(count-count_invalid, count, count_invalid))
| 3,639 | 42.333333 | 207 | py |
tencent-ml-images | tencent-ml-images-master/data/tfrecord.py | #!/usr/bin/python
import sys
import os
import tensorflow as tf
import numpy as np
import imghdr
import threading
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-idx","--indexs", type=str, default="", help="dirs contains train index files")
parser.add_argument("-tfs", "--tfrecords", type=str, default="", help="dirs contains train tfrecords")
parser.add_argument("-im", "--images", type=str, default="", help="the path contains the raw images")
parser.add_argument("-cls", "--num_class", type=int, default=0, help="class label number")
parser.add_argument("-one", "--one_hot", type=bool, default=True, help="indicates the format of label fields in tfrecords")
parser.add_argument("-sidx", "--start_index", type=int, default=0, help="the start number of train tfrecord files")
args = parser.parse_args()
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
return (imghdr.what(filename)=='png')
def _is_jpeg(filename):
return (imghdr.what(filename)=='jpeg')
def _process_image(filename, coder):
"""Process a single image file."""
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
if not _is_jpeg(filename):
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
else:
try:
image = coder.decode_jpeg(image_data)
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
except:
print('Cannot converted type %s' % imghdr.what(filename))
return [], 0, 0
image = coder.decode_jpeg(image_data)
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _save_one(train_txt, tfrecord_name, label_num, one_hot):
writer = tf.python_io.TFRecordWriter(tfrecord_name)
with tf.Session() as sess:
coder = ImageCoder()
with open(train_txt, 'r') as lines:
for line in lines:
sp = line.rstrip("\n").split()
imgf = os.path.join(args.images, sp[0])
print(imgf)
img, height, width = _process_image(imgf, coder)
if height*width==0:
continue
if one_hot:
label = np.zeros([label_num,], dtype=np.float32)
for i in range(1, len(sp)):
if len(sp[i].split(":"))==2:
label[int(sp[i].split(":")[0])] = float(sp[i].split(":")[1])
else:
label[int(sp[i].split(":")[0])] = 1.0
example = tf.train.Example(features=tf.train.Features(feature={
'width': _int64_feature(width),
'height': _int64_feature(height),
'image': _bytes_feature(tf.compat.as_bytes(img)),
'label': _bytes_feature(tf.compat.as_bytes(label.tostring())),
'name': _bytes_feature(sp[0])
}))
writer.write(example.SerializeToString())
else:
label = int(sp[1])
example = tf.train.Example(features=tf.train.Features(feature={
'width': _int64_feature(width),
'height': _int64_feature(height),
'image': _bytes_feature(tf.compat.as_bytes(img)),
'label': _int64_feature(label),
'name': _bytes_feature(sp[0])
}))
writer.write(example.SerializeToString())
writer.close()
def _save():
files = os.listdir(args.indexs)
coord = tf.train.Coordinator()
threads = []
i = args.start_index
for idxf in files:
threads.append(
threading.Thread(target=_save_one,
args=(os.path.join(args.indexs, idxf),
os.path.join(args.tfrecords, str(i) + ".tfrecords"),
args.num_class, args.one_hot)
)
)
i = i+1
i=0
thread = []
for t in threads:
if i==32:
for ct in thread:
ct.start()
coord.join(thread)
i = 0
thread = [t]
else:
thread.append(t)
i += 1
for ct in thread:
ct.start()
coord.join(thread)
if __name__=='__main__':
_save()
| 5,799 | 33.117647 | 123 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/main.py | import sys
import argparse
import os
import random
import numpy
import pandas as pd
import torch
import torch.backends.cudnn as cudnn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import agents
import utils
numpy.set_printoptions(edgeitems=5, linewidth=160, formatter={'float': '{:0.6f}'.format})
torch.set_printoptions(edgeitems=5, precision=6, linewidth=160)
pd.options.display.float_format = '{:,.6f}'.format
pd.set_option('display.width', 160)
parser = argparse.ArgumentParser(description='Main')
parser.add_argument('-x', '--executions', default=1, type=int, metavar='N', help='Number of executions (default: 1)')
parser.add_argument('-w', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('-e', '--epochs', default=300, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('-bs', '--batch-size', default=64, type=int, metavar='N', help='mini-batch size (default: 64)')
parser.add_argument('-lr', '--original-learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('-lrdr', '--learning-rate-decay-rate', default=0.1, type=float, metavar='LRDR', help='learning rate decay rate')
parser.add_argument('-lrde', '--learning-rate-decay-epochs', default="150 200 250", metavar='LRDE', help='learning rate decay epochs')
parser.add_argument('-lrdp', '--learning-rate-decay-period', default=500, type=int, metavar='LRDP', help='learning rate decay period')
parser.add_argument('-mm', '--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('-wd', '--weight-decay', default=1*1e-4, type=float, metavar='W', help='weight decay (default: 1*1e-4)')
parser.add_argument('-pf', '--print-freq', default=1, type=int, metavar='N', help='print frequency (default: 1)')
parser.add_argument('-gpu', '--gpu-id', default='0', type=int, help='id for CUDA_VISIBLE_DEVICES')
parser.add_argument('-ei', '--exps-inputs', default="", type=str, metavar='PATHS', help='Inputs paths for the experiments')
parser.add_argument('-et', '--exps-types', default="", type=str, metavar='EXPERIMENTS', help='Experiments types to be performed')
parser.add_argument('-ec', '--exps-configs', default="", type=str, metavar='CONFIGS', help='Experiments configs to be used')
parser.add_argument('-sd', '--seed', default=42, type=int, metavar='N', help='Seed (default: 42)')
args = parser.parse_args()
args.exps_inputs = args.exps_inputs.split(":")
args.exps_types = args.exps_types.split(":")
args.exps_configs = args.exps_configs.split(":")
args.learning_rate_decay_epochs = [int(item) for item in args.learning_rate_decay_epochs.split()]
random.seed(args.seed)
numpy.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
print("seed", args.seed)
cudnn.benchmark = False
if args.executions == 1:
cudnn.deterministic = True
print("Deterministic!!!")
else:
cudnn.deterministic = False
print("No deterministic!!!")
torch.cuda.set_device(args.gpu_id)
print('\n__Python VERSION:', sys.version)
print('__pyTorch VERSION:', torch.__version__)
print('__Number CUDA Devices:', torch.cuda.device_count())
print('Active CUDA Device: GPU', torch.cuda.current_device())
def main():
print("\n\n\n\n\n\n")
print("***************************************************************")
print("***************************************************************")
print("***************************************************************")
print("***************************************************************")
for args.exp_input in args.exps_inputs:
for args.exp_type in args.exps_types:
for args.exp_config in args.exps_configs:
print("\n\n\n\n")
print("***************************************************************")
print("EXPERIMENT INPUT:", args.exp_input)
print("EXPERIMENT TYPE:", args.exp_type)
print("EXPERIMENT CONFIG:", args.exp_config)
args.experiment_path = os.path.join("experiments", args.exp_input, args.exp_type, args.exp_config)
if not os.path.exists(args.experiment_path):
os.makedirs(args.experiment_path)
print("EXPERIMENT PATH:", args.experiment_path)
args.executions_best_results_file_path = os.path.join(args.experiment_path, "results_best.csv")
args.executions_raw_results_file_path = os.path.join(args.experiment_path, "results_raw.csv")
for config in args.exp_config.split("+"):
config = config.split("~")
if config[0] == "data":
args.dataset = str(config[1])
print("DATASET:", args.dataset)
elif config[0] == "model":
args.model_name = str(config[1])
print("MODEL:", args.model_name)
elif config[0] == "loss":
args.loss = str(config[1])
print("LOSS:", args.loss)
args.number_of_model_classes = None
if args.dataset == "cifar10":
args.number_of_model_classes = args.number_of_model_classes if args.number_of_model_classes else 10
elif args.dataset == "cifar100":
args.number_of_model_classes = args.number_of_model_classes if args.number_of_model_classes else 100
elif args.dataset == "svhn":
args.number_of_model_classes = args.number_of_model_classes if args.number_of_model_classes else 10
print("***************************************************************")
for args.execution in range(1, args.executions + 1):
print("\n\n################ EXECUTION:", args.execution, "OF", args.executions, "################")
args.best_model_file_path = os.path.join(args.experiment_path, "model" + str(args.execution) + ".pth")
utils.save_dict_list_to_csv([vars(args)], args.experiment_path, args.exp_type+"_args")
print("\nARGUMENTS:", dict(utils.load_dict_list_from_csv(args.experiment_path, args.exp_type+"_args")[0]))
cnn_agent = agents.ClassifierAgent(args)
cnn_agent.train_classify()
experiment_results = pd.read_csv(os.path.join(os.path.join(args.experiment_path, "results_best.csv")))
print("\n################################\n", "EXPERIMENT RESULTS", "\n################################")
print(args.experiment_path)
print("\n", experiment_results.transpose())
if __name__ == '__main__':
main()
| 6,839 | 51.21374 | 134 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/example.py | # Code reused from: https://github.com/kuangliu/pytorch-cifar
import torch
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import net
import losses
import tools
from torchmetrics import AUROC
import random
import numpy
import torchnet as tnt
base_seed = 42
random.seed(base_seed)
numpy.random.seed(base_seed)
torch.manual_seed(base_seed)
torch.cuda.manual_seed(base_seed)
cudnn.benchmark = False
cudnn.deterministic = True
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 1 # start from epoch one
# Data
print('==> Preparing data...')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.491, 0.482, 0.446), (0.247, 0.243, 0.261)),])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.491, 0.482, 0.446), (0.247, 0.243, 0.261)),])
trainset = torchvision.datasets.CIFAR10(root='data/cifar10', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True, num_workers=4, worker_init_fn=lambda worker_id: random.seed(base_seed + worker_id))
testset = torchvision.datasets.CIFAR10(root='data/cifar10', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False, num_workers=4)
# Model
print('==> Building model...')
model = net.DenseNet3(100, 10)
model = model.to(device)
#############################################
#criterion = nn.CrossEntropyLoss()
criterion = losses.IsoMaxPlusLossSecondPart()
#############################################
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, nesterov=True, weight_decay=1*1e-4)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[150, 200, 250], gamma=0.1)
def train(epoch):
print('Epoch: %d' % epoch)
model.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
tools.progress_bar(batch_idx, len(trainloader), 'Loss: %.4f | Acc: %.4f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
def test(epoch):
global best_acc
model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
tools.progress_bar(batch_idx, len(testloader), 'Loss: %.4f | Acc: %.4f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint
acc = 100.*correct/total
if acc > best_acc:
print('Saving...')
state = {
'model': model.state_dict(),
'acc': acc,
'epoch': epoch,}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, 'checkpoint/ckpt.pth')
best_acc = acc
def detect(inloader, oodloader):
auroc = AUROC(pos_label=1)
auroctnt = tnt.meter.AUCMeter()
model.eval()
with torch.no_grad():
for _, (inputs, targets) in enumerate(inloader):
inputs, targets = inputs.to(device), targets.to(device)
targets.fill_(1)
outputs = model(inputs)
#probabilities = torch.nn.Softmax(dim=1)(outputs)
#score = probabilities.max(dim=1)[0] # this is the maximum probability score
#entropies = -(probabilities * torch.log(probabilities)).sum(dim=1)
#score = -entropies # this is the negative entropy score
# the negative entropy score is the best option for the IsoMax loss
# outputs are equal to logits, which in turn are equivalent to negative distances
score = outputs.max(dim=1)[0] # this is the minimum distance score
# the minimum distance score is the best option for the IsoMax+ loss
auroc.update(score, targets)
auroctnt.add(score, targets)
for _, (inputs, targets) in enumerate(oodloader):
inputs, targets = inputs.to(device), targets.to(device)
targets.fill_(0)
outputs = model(inputs)
#probabilities = torch.nn.Softmax(dim=1)(outputs)
#score = probabilities.max(dim=1)[0] # this is the maximum probability score
#entropies = -(probabilities * torch.log(probabilities)).sum(dim=1)
#score = -entropies # this is the negative entropy score
# the negative entropy score is the best option for the IsoMax loss
# outputs are equal to logits, which in turn are equivalent to negative distances
score = outputs.max(dim=1)[0] # this is the minimum distance score for detection
# the minimum distance score is the best option for the IsoMax+ loss
auroc.update(score, targets)
auroctnt.add(score, targets)
return auroc.compute(), auroctnt.value()[0]
total_epochs = 300
for epoch in range(start_epoch, start_epoch + total_epochs):
print()
for param_group in optimizer.param_groups:
print("LEARNING RATE: ", param_group["lr"])
train(epoch)
test(epoch)
scheduler.step()
checkpoint = torch.load('checkpoint/ckpt.pth')
model.load_state_dict(checkpoint['model'])
test_acc = checkpoint['acc']
print()
print("###################################################")
print("Test Accuracy (%): {0:.4f}".format(test_acc))
print("###################################################")
print()
dataroot = os.path.expanduser(os.path.join('data', 'Imagenet_resize'))
oodset = torchvision.datasets.ImageFolder(dataroot, transform=transform_test)
oodloader = torch.utils.data.DataLoader(oodset, batch_size=64, shuffle=False, num_workers=4)
auroc = detect(testloader, oodloader)
print()
print("#################################################################################################################")
print("Detection performance for ImageNet Resize as Out-of-Distribution [AUROC] (%): {0:.4f}".format(100. * auroc[0].item()), auroc[1])
print("#################################################################################################################")
print()
dataroot = os.path.expanduser(os.path.join('data', 'LSUN_resize'))
oodset = torchvision.datasets.ImageFolder(dataroot, transform=transform_test)
oodloader = torch.utils.data.DataLoader(oodset, batch_size=64, shuffle=False, num_workers=4)
auroc = detect(testloader, oodloader)
print()
print("#################################################################################################################")
print("Detection performance for LSUN Resize as Out-of-Distribution [AUROC] (%): {0:.4f}".format(100. * auroc[0].item()), auroc[1])
print("#################################################################################################################")
print()
oodset = torchvision.datasets.SVHN(root='data/svhn', split="test", download=True, transform=transform_test)
oodloader = torch.utils.data.DataLoader(oodset, batch_size=64, shuffle=False, num_workers=4)
auroc = detect(testloader, oodloader)
print()
print("#################################################################################################################")
print("Detection performance for SVHN as Out-of-Distribution [AUROC] (%): {0:.4f}".format(100. * auroc[0].item()), auroc[1])
print("#################################################################################################################")
print()
| 8,369 | 41.923077 | 164 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/calculate_log.py | from __future__ import print_function
import numpy as np
import numpy as np
def get_curve(dir_name, stypes=['Baseline', 'Gaussian_LDA']):
tp, fp = dict(), dict()
tnr_at_tpr95 = dict()
for stype in stypes:
known = np.loadtxt('{}/confidence_{}_In.txt'.format(dir_name, stype), delimiter='\n')
novel = np.loadtxt('{}/confidence_{}_Out.txt'.format(dir_name, stype), delimiter='\n')
known.sort()
novel.sort()
#end = np.max([np.max(known), np.max(novel)])
#start = np.min([np.min(known),np.min(novel)])
num_k = known.shape[0]
num_n = novel.shape[0]
tp[stype] = -np.ones([num_k+num_n+1], dtype=int)
fp[stype] = -np.ones([num_k+num_n+1], dtype=int)
tp[stype][0], fp[stype][0] = num_k, num_n
k, n = 0, 0
for l in range(num_k+num_n):
if k == num_k:
tp[stype][l+1:] = tp[stype][l]
fp[stype][l+1:] = np.arange(fp[stype][l]-1, -1, -1)
break
elif n == num_n:
tp[stype][l+1:] = np.arange(tp[stype][l]-1, -1, -1)
fp[stype][l+1:] = fp[stype][l]
break
else:
if novel[n] < known[k]:
n += 1
tp[stype][l+1] = tp[stype][l]
fp[stype][l+1] = fp[stype][l] - 1
else:
k += 1
tp[stype][l+1] = tp[stype][l] - 1
fp[stype][l+1] = fp[stype][l]
tpr95_pos = np.abs(tp[stype] / num_k - .95).argmin()
tnr_at_tpr95[stype] = 1. - fp[stype][tpr95_pos] / num_n
return tp, fp, tnr_at_tpr95
def metric(dir_name, stypes=['Bas', 'Gau'], verbose=False):
tp, fp, tnr_at_tpr95 = get_curve(dir_name, stypes)
results = dict()
mtypes = ['TNR', 'AUROC', 'DTACC', 'AUIN', 'AUOUT']
if verbose:
print(' ', end='')
for mtype in mtypes:
print(' {mtype:6s}'.format(mtype=mtype), end='')
print('')
for stype in stypes:
if verbose:
print('{stype:5s} '.format(stype=stype), end='')
results[stype] = dict()
# TNR
mtype = 'TNR'
results[stype][mtype] = tnr_at_tpr95[stype]
if verbose:
print(' {val:6.3f}'.format(val=100.*results[stype][mtype]), end='')
# AUROC
mtype = 'AUROC'
tpr = np.concatenate([[1.], tp[stype]/tp[stype][0], [0.]])
fpr = np.concatenate([[1.], fp[stype]/fp[stype][0], [0.]])
results[stype][mtype] = -np.trapz(1.-fpr, tpr)
if verbose:
print(' {val:6.3f}'.format(val=100.*results[stype][mtype]), end='')
# DTACC
mtype = 'DTACC'
results[stype][mtype] = .5 * (tp[stype]/tp[stype][0] + 1.-fp[stype]/fp[stype][0]).max()
if verbose:
print(' {val:6.3f}'.format(val=100.*results[stype][mtype]), end='')
# AUIN
mtype = 'AUIN'
denom = tp[stype]+fp[stype]
denom[denom == 0.] = -1.
pin_ind = np.concatenate([[True], denom > 0., [True]])
pin = np.concatenate([[.5], tp[stype]/denom, [0.]])
results[stype][mtype] = -np.trapz(pin[pin_ind], tpr[pin_ind])
if verbose:
print(' {val:6.3f}'.format(val=100.*results[stype][mtype]), end='')
# AUOUT
mtype = 'AUOUT'
denom = tp[stype][0]-tp[stype]+fp[stype][0]-fp[stype]
denom[denom == 0.] = -1.
pout_ind = np.concatenate([[True], denom > 0., [True]])
pout = np.concatenate([[0.], (fp[stype][0]-fp[stype])/denom, [.5]])
results[stype][mtype] = np.trapz(pout[pout_ind], 1.-fpr[pout_ind])
if verbose:
print(' {val:6.3f}'.format(val=100.*results[stype][mtype]), end='')
print('')
return results
| 3,803 | 39.042105 | 95 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/detect.py | from __future__ import print_function
import argparse
import torch
import models
import os
import losses
import data_loader
import calculate_log as callog
from torchvision import transforms
parser = argparse.ArgumentParser(description='PyTorch code: OOD detector')
parser.add_argument('--batch_size', type=int, default=64, metavar='N', help='batch size for data loader')
parser.add_argument('--dataset', required=True, help='cifar10 | cifar100 | svhn')
parser.add_argument('--dataroot', default='data', help='path to dataset')
parser.add_argument('--net_type', required=True, help='resnet | densenet')
parser.add_argument('--gpu', type=int, default=0, help='gpu index')
parser.add_argument('--loss', required=True, help='the loss used')
parser.add_argument('--dir', default="", type=str, help='Part of the dir to use')
parser.add_argument('-x', '--executions', default=1, type=int, metavar='N', help='Number of executions (default: 1)')
args = parser.parse_args()
print(args)
torch.cuda.set_device(args.gpu)
def main():
dir_path = os.path.join("experiments", args.dir, "train_classify", "data~"+args.dataset+"+model~"+args.net_type+"+loss~"+str(args.loss))
file_path = os.path.join(dir_path, "results_odd.csv")
with open(file_path, "w") as results_file:
results_file.write(
"EXECUTION,MODEL,IN-DATA,OUT-DATA,LOSS,AD-HOC,SCORE,INFER-LEARN,INFER-TRANS,"
"TNR,AUROC,DTACC,AUIN,AUOUT,CPU_FALSE,CPU_TRUE,GPU_FALSE,GPU_TRUE,TEMPERATURE,MAGNITUDE\n")
args_outf = os.path.join("temporary", args.dir, args.loss, args.net_type + '+' + args.dataset)
if os.path.isdir(args_outf) == False:
os.makedirs(args_outf)
# define number of classes
if args.dataset == 'cifar100':
args.num_classes = 100
elif args.dataset == 'imagenet32':
args.num_classes = 1000
else:
args.num_classes = 10
if args.dataset == 'cifar10':
out_dist_list = ['svhn', 'imagenet_resize', 'lsun_resize']
elif args.dataset == 'cifar100':
out_dist_list = ['svhn', 'imagenet_resize', 'lsun_resize']
elif args.dataset == 'svhn':
out_dist_list = ['cifar10', 'imagenet_resize', 'lsun_resize']
if args.dataset == 'cifar10':
in_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.491, 0.482, 0.446), (0.247, 0.243, 0.261))])
elif args.dataset == 'cifar100':
in_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.507, 0.486, 0.440), (0.267, 0.256, 0.276))])
elif args.dataset == 'svhn':
in_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.437, 0.443, 0.472), (0.198, 0.201, 0.197))])
for args.execution in range(1, args.executions + 1):
print("EXECUTION:", args.execution)
pre_trained_net = os.path.join(dir_path, "model" + str(args.execution) + ".pth")
if args.loss.split("_")[0] == "softmax":
loss_first_part = losses.SoftMaxLossFirstPart
scores = ["ES"]
elif args.loss.split("_")[0] == "isomax":
loss_first_part = losses.IsoMaxLossFirstPart
scores = ["ES"]
elif args.loss.split("_")[0] == "isomaxplus":
loss_first_part = losses.IsoMaxPlusLossFirstPart
scores = ["MDS"]
# load networks
if args.net_type == 'densenetbc100':
model = models.DenseNet3(100, int(args.num_classes), loss_first_part=loss_first_part)
elif args.net_type == 'resnet110':
model = models.ResNet110(num_c=args.num_classes, loss_first_part=loss_first_part)
model.load_state_dict(torch.load(pre_trained_net, map_location="cuda:" + str(args.gpu)))
model.cuda()
print('load model: ' + args.net_type)
# load dataset
print('load target valid data: ', args.dataset)
_, test_loader = data_loader.getTargetDataSet(args.dataset, args.batch_size, in_transform, args.dataroot)
for score in scores:
print("\n\n\n###############################")
print("###############################")
print("SCORE:", score)
print("###############################")
print("###############################")
base_line_list = []
get_scores(model, test_loader, args_outf, True, score)
out_count = 0
for out_dist in out_dist_list:
out_test_loader = data_loader.getNonTargetDataSet(out_dist, args.batch_size, in_transform, args.dataroot)
print('Out-distribution: ' + out_dist)
get_scores(model, out_test_loader, args_outf, False, score)
test_results = callog.metric(args_outf, ['PoT'])
base_line_list.append(test_results)
out_count += 1
# print the results
mtypes = ['TNR', 'AUROC', 'DTACC', 'AUIN', 'AUOUT']
print('Baseline method: train in_distribution: ' + args.dataset + '==========')
count_out = 0
for results in base_line_list:
print('out_distribution: '+ out_dist_list[count_out])
for mtype in mtypes:
print(' {mtype:6s}'.format(mtype=mtype), end='')
print('\n{val:6.2f}'.format(val=100.*results['PoT']['TNR']), end='')
print(' {val:6.2f}'.format(val=100.*results['PoT']['AUROC']), end='')
print(' {val:6.2f}'.format(val=100.*results['PoT']['DTACC']), end='')
print(' {val:6.2f}'.format(val=100.*results['PoT']['AUIN']), end='')
print(' {val:6.2f}\n'.format(val=100.*results['PoT']['AUOUT']), end='')
print('')
#Saving odd results:
with open(file_path, "a") as results_file:
results_file.write("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(
str(args.execution), args.net_type, args.dataset, out_dist_list[count_out],
str(args.loss), "NATIVE", score, 'NO', False,
'{:.2f}'.format(100.*results['PoT']['TNR']),
'{:.2f}'.format(100.*results['PoT']['AUROC']),
'{:.2f}'.format(100.*results['PoT']['DTACC']),
'{:.2f}'.format(100.*results['PoT']['AUIN']),
'{:.2f}'.format(100.*results['PoT']['AUOUT']),
0, 0, 0, 0, 1, 0))
count_out += 1
def get_scores(model, test_loader, outf, out_flag, score_type=None):
model.eval()
total = 0
if out_flag == True:
temp_file_name_val = '%s/confidence_PoV_In.txt'%(outf)
temp_file_name_test = '%s/confidence_PoT_In.txt'%(outf)
else:
temp_file_name_val = '%s/confidence_PoV_Out.txt'%(outf)
temp_file_name_test = '%s/confidence_PoT_Out.txt'%(outf)
g = open(temp_file_name_val, 'w')
f = open(temp_file_name_test, 'w')
for data, _ in test_loader:
total += data.size(0)
data = data.cuda()
with torch.no_grad():
logits = model(data)
probabilities = torch.nn.Softmax(dim=1)(logits)
if score_type == "MPS": # the maximum probability score
soft_out = probabilities.max(dim=1)[0]
elif score_type == "ES": # the negative entropy score
soft_out = (probabilities * torch.log(probabilities)).sum(dim=1)
elif score_type == "MDS": # the minimum distance score
soft_out = logits.max(dim=1)[0]
for i in range(data.size(0)):
f.write("{}\n".format(soft_out[i]))
f.close()
g.close()
if __name__ == '__main__':
main()
| 7,841 | 44.593023 | 140 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/data_loader.py | import torch
from torchvision import datasets
import os
def getSVHN(batch_size, TF, data_root='data', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'svhn'))
kwargs.pop('input_size', None)
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.SVHN(root=data_root, split='train', download=True, transform=TF), batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.SVHN(root=data_root, split='test', download=True, transform=TF,), batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
def getCIFAR10(batch_size, TF, data_root='data', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'cifar10'))
kwargs.pop('input_size', None)
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=data_root, train=True, download=True, transform=TF), batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=data_root, train=False, download=True, transform=TF), batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
def getCIFAR100(batch_size, TF, data_root='data', TTF=None, train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'cifar100'))
kwargs.pop('input_size', None)
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(root=data_root, train=True, download=True, transform=TF, target_transform=TTF), batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(root=data_root, train=False, download=True, transform=TF, target_transform=TTF), batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
def getTargetDataSet(data_type, batch_size, input_TF, dataroot):
if data_type == 'cifar10':
train_loader, test_loader = getCIFAR10(batch_size=batch_size, TF=input_TF, data_root=dataroot, num_workers=1)
elif data_type == 'cifar100':
train_loader, test_loader = getCIFAR100(batch_size=batch_size, TF=input_TF, data_root=dataroot, TTF=None, num_workers=1)
elif data_type == 'svhn':
train_loader, test_loader = getSVHN(batch_size=batch_size, TF=input_TF, data_root=dataroot, num_workers=1)
return train_loader, test_loader
def getNonTargetDataSet(data_type, batch_size, input_TF, dataroot):
if data_type == 'cifar10':
_, test_loader = getCIFAR10(batch_size=batch_size, TF=input_TF, data_root=dataroot, num_workers=1)
elif data_type == 'svhn':
_, test_loader = getSVHN(batch_size=batch_size, TF=input_TF, data_root=dataroot, num_workers=1)
elif data_type == 'cifar100':
_, test_loader = getCIFAR100(batch_size=batch_size, TF=input_TF, data_root=dataroot, TTF=lambda x: 0, num_workers=1)
elif data_type == 'imagenet_resize':
dataroot = os.path.expanduser(os.path.join(dataroot, 'Imagenet_resize'))
testsetout = datasets.ImageFolder(dataroot, transform=input_TF)
test_loader = torch.utils.data.DataLoader(testsetout, batch_size=batch_size, shuffle=False, num_workers=1)
elif data_type == 'lsun_resize':
dataroot = os.path.expanduser(os.path.join(dataroot, 'LSUN_resize'))
testsetout = datasets.ImageFolder(dataroot, transform=input_TF)
test_loader = torch.utils.data.DataLoader(testsetout, batch_size=batch_size, shuffle=False, num_workers=1)
return test_loader
| 3,956 | 46.674699 | 158 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/tools.py | # Code reused from: https://github.com/kuangliu/pytorch-cifar
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import os
import sys
import time
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
| 2,429 | 24.578947 | 68 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/net.py | # Code reused from: https://github.com/kuangliu/pytorch-cifar
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import losses
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
return torch.cat([x, out], 1)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BottleneckBlock, self).__init__()
inter_planes = out_planes * 4
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, inter_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(inter_planes)
self.conv2 = nn.Conv2d(inter_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
out = self.conv2(self.relu(self.bn2(out)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return torch.cat([x, out], 1)
class TransitionBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(TransitionBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return F.avg_pool2d(out, 2)
class DenseBlock(nn.Module):
def __init__(self, nb_layers, in_planes, growth_rate, block, dropRate=0.0):
super(DenseBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, growth_rate, nb_layers, dropRate)
def _make_layer(self, block, in_planes, growth_rate, nb_layers, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(in_planes + i * growth_rate, growth_rate, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class DenseNet3(nn.Module):
def __init__(self, depth, num_classes, growth_rate=12, reduction=0.5, bottleneck=True, dropRate=0.0):
super(DenseNet3, self).__init__()
in_planes = 2 * growth_rate
n = (depth - 4) / 3
if bottleneck == True:
n = n / 2
block = BottleneckBlock
else:
block = BasicBlock
# 1st conv before any dense block
self.conv1 = nn.Conv2d(3, in_planes, kernel_size=3, stride=1, padding=1, bias=False)
# 1st block
self.block1 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes + n * growth_rate)
self.trans1 = TransitionBlock(in_planes, int(math.floor(in_planes * reduction)), dropRate=dropRate)
in_planes = int(math.floor(in_planes * reduction))
# 2nd block
self.block2 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes + n * growth_rate)
self.trans2 = TransitionBlock(in_planes, int(math.floor(in_planes * reduction)), dropRate=dropRate)
in_planes = int(math.floor(in_planes * reduction))
# 3rd block
self.block3 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes + n * growth_rate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.in_planes = in_planes
########################################################################
#self.classifier = nn.Linear(in_planes, num_classes)
self.classifier = losses.IsoMaxPlusLossFirstPart(in_planes, num_classes)
########################################################################
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
#nn.init.kaiming_normal_(m.weight)
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
#elif isinstance(m, nn.Linear):
# nn.init.constant_(m.bias, 0)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.in_planes)
return self.classifier(out)
| 5,441 | 40.861538 | 107 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/analyze.py | import argparse
import os
import torch
import sys
import numpy as np
import pandas as pd
import random
pd.options.display.float_format = '{:,.4f}'.format
pd.set_option('display.width', 160)
parser = argparse.ArgumentParser(description='Analize results in csv files')
parser.add_argument('-p', '--path', default="", type=str, help='Path for the experiments to be analized')
parser.set_defaults(argument=True)
random.seed(1234)
np.random.seed(1234)
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
def main():
DATASETS = ['svhn', 'cifar10', 'cifar100']
MODELS = ['densenetbc100', 'resnet110']
LOSSES = ['softmax_no_no_no_final', 'isomax_no_no_no_final', 'isomaxplus_no_no_no_final',
]
print(DATASETS)
print(MODELS)
print(LOSSES)
args = parser.parse_args()
path = os.path.join("experiments", args.path)
if not os.path.exists(path):
sys.exit('You should pass a valid path to analyze!!!')
print("\n#####################################")
print("########## FINDING FILES ############")
print("#####################################")
list_of_files = []
file_names_dict_of_lists = {}
for (dir_path, dir_names, file_names) in os.walk(path):
for filename in file_names:
if filename.endswith('.csv') or filename.endswith('.npy') or filename.endswith('.pth'):
if filename not in file_names_dict_of_lists:
file_names_dict_of_lists[filename] = [os.path.join(dir_path, filename)]
else:
file_names_dict_of_lists[filename] += [os.path.join(dir_path, filename)]
list_of_files += [os.path.join(dir_path, filename)]
print()
for key in file_names_dict_of_lists:
print(key)
#print(file_names_dict_of_lists[key])
print("\n#####################################")
print("######## TABLE: RAW RESULTS #########")
print("#####################################")
data_frame_list = []
for file in file_names_dict_of_lists['results_raw.csv']:
data_frame_list.append(pd.read_csv(file))
raw_results_data_frame = pd.concat(data_frame_list)
print(raw_results_data_frame[:30])
print("\n#####################################")
print("###### TABLE: BEST ACCURACIES #######")
print("#####################################")
data_frame_list = []
for file in file_names_dict_of_lists['results_best.csv']:
data_frame_list.append(pd.read_csv(file))
best_results_data_frame = pd.concat(data_frame_list)
best_results_data_frame.to_csv(os.path.join(path, 'all_results_best.csv'), index=False)
for data in DATASETS:
for model in MODELS:
print("\n########")
print(data)
print(model)
df = best_results_data_frame.loc[
best_results_data_frame['DATA'].isin([data]) &
best_results_data_frame['MODEL'].isin([model])
]
df = df.rename(columns={'VALID MAX_PROBS MEAN': 'MAX_PROBS', 'VALID ENTROPIES MEAN': 'ENTROPIES',
'VALID INTRA_LOGITS MEAN': 'INTRA_LOGITS', 'VALID INTER_LOGITS MEAN': 'INTER_LOGITS'})
df = df.groupby(['LOSS'], as_index=False)[[
'TRAIN LOSS', 'TRAIN ACC1','VALID LOSS', 'VALID ACC1', 'ENTROPIES',
]].agg(['mean','std','count'])
df = df.sort_values([('VALID ACC1','mean')], ascending=False)
print(df)
print("########\n")
print("\n#####################################")
print("######## TABLE: ODD METRICS #########")
print("#####################################")
data_frame_list = []
for file in file_names_dict_of_lists['results_odd.csv']:
data_frame_list.append(pd.read_csv(file))
best_results_data_frame = pd.concat(data_frame_list)
best_results_data_frame.to_csv(os.path.join(path, 'all_results_odd.csv'), index=False)
for data in DATASETS:
for model in MODELS:
print("\n#########################################################################################################")
print("#########################################################################################################")
print("#########################################################################################################")
print("#########################################################################################################")
print(data)
print(model)
df = best_results_data_frame.loc[
best_results_data_frame['IN-DATA'].isin([data]) &
best_results_data_frame['MODEL'].isin([model]) &
best_results_data_frame['SCORE'].isin(["MPS","ES","MDS"]) &
best_results_data_frame['OUT-DATA'].isin(['svhn','lsun_resize','imagenet_resize','cifar10'])
]
df = df[['MODEL','IN-DATA','LOSS','SCORE','EXECUTION','OUT-DATA','TNR','AUROC','DTACC','AUIN','AUOUT']]
ndf = df.groupby(['LOSS','SCORE','OUT-DATA'], as_index=False)[['TNR','AUROC']].agg(['mean','std','count'])
#print(ndf)
#print()
ndf = df.groupby(['LOSS','SCORE','OUT-DATA']).agg(
mean_TNR=('TNR', 'mean'), std_TNR=('TNR', 'std'), count_TNR=('TNR', 'count'),
mean_AUROC=('AUROC', 'mean'), std_AUROC=('AUROC', 'std'), count_AUROC=('AUROC', 'count'))
#nndf = nndf.sort_values(['mean_AUROC'], ascending=False)
#print(nndf)
#print()
nndf = ndf.groupby(['LOSS','SCORE']).agg(
mean_mean_TNR=('mean_TNR', 'mean'), mean_std_TNR=('std_TNR', 'mean'), count_mean_TNR=('mean_TNR', 'count'),
mean_mean_AUROC=('mean_AUROC', 'mean'), mean_std_AUROC=('std_AUROC', 'mean'), count_mean_AUROC=('mean_AUROC', 'count'))
nndf = nndf.sort_values(['mean_mean_AUROC'], ascending=False)
print(nndf)
print()
if __name__ == '__main__':
main()
| 6,074 | 43.343066 | 135 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/loaders/image.py | import random
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
class ImageLoader:
def __init__(self, args):
self.args = args
self.mnist = False
if args.dataset == "cifar10":
self.normalize = transforms.Normalize((0.491, 0.482, 0.446), (0.247, 0.243, 0.261))
self.train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
self.normalize,])
self.inference_transform = transforms.Compose([
transforms.ToTensor(),
self.normalize,])
self.dataset_path = "data/cifar10"
self.trainset_for_train = torchvision.datasets.CIFAR10(
root=self.dataset_path, train=True, download=True, transform=self.train_transform)
self.trainset_for_infer = torchvision.datasets.CIFAR10(
root=self.dataset_path, train=True, download=True, transform=self.inference_transform)
self.val_set = torchvision.datasets.CIFAR10(
root=self.dataset_path, train=False, download=True, transform=self.inference_transform)
self.outlier_data = None
elif args.dataset == "cifar100":
self.normalize = transforms.Normalize((0.507, 0.486, 0.440), (0.267, 0.256, 0.276))
self.train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
self.normalize,])
self.inference_transform = transforms.Compose([
transforms.ToTensor(),
self.normalize,])
self.dataset_path = "data/cifar100"
self.trainset_for_train = torchvision.datasets.CIFAR100(
root=self.dataset_path, train=True, download=True, transform=self.train_transform)
self.trainset_for_infer = torchvision.datasets.CIFAR100(
root=self.dataset_path, train=True, download=True, transform=self.inference_transform)
self.val_set = torchvision.datasets.CIFAR100(
root=self.dataset_path, train=False, download=True, transform=self.inference_transform)
self.outlier_data = None
elif args.dataset == "svhn":
self.normalize = transforms.Normalize((0.437, 0.443, 0.472), (0.198, 0.201, 0.197))
self.train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
self.normalize,])
self.inference_transform = transforms.Compose([
transforms.ToTensor(),
self.normalize,])
self.dataset_path = "data/svhn"
self.trainset_for_train = torchvision.datasets.SVHN(
root=self.dataset_path, split="train", download=True, transform=self.train_transform)
self.trainset_for_infer = torchvision.datasets.SVHN(
root=self.dataset_path, split="train", download=True, transform=self.inference_transform)
self.val_set = torchvision.datasets.SVHN(
root=self.dataset_path, split="test", download=True, transform=self.inference_transform)
self.outlier_data = None
def get_loaders(self):
trainset_loader_for_train = DataLoader(
self.trainset_for_train, batch_size=self.args.batch_size, shuffle=True, num_workers=self.args.workers,
worker_init_fn=lambda worker_id: random.seed(self.args.seed + worker_id))
trainset_loader_for_infer = DataLoader(self.trainset_for_infer, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.workers,)
valset_loader = DataLoader(self.val_set, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.workers)
outlier_loader = DataLoader(
self.outlier_data, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.workers,
worker_init_fn=lambda worker_id: random.seed(self.args.seed + worker_id))
return trainset_loader_for_train, trainset_loader_for_infer, valset_loader, outlier_loader
| 4,367 | 52.925926 | 151 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/loaders/__init__.py | from .image import *
| 21 | 10 | 20 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/models/densenet.py | # code reused from: https://github.com/kuangliu/pytorch-cifar
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
return torch.cat([x, out], 1)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BottleneckBlock, self).__init__()
inter_planes = out_planes * 4
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, inter_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(inter_planes)
self.conv2 = nn.Conv2d(inter_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
out = self.conv2(self.relu(self.bn2(out)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return torch.cat([x, out], 1)
class TransitionBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(TransitionBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return F.avg_pool2d(out, 2)
class DenseBlock(nn.Module):
def __init__(self, nb_layers, in_planes, growth_rate, block, dropRate=0.0):
super(DenseBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, growth_rate, nb_layers, dropRate)
def _make_layer(self, block, in_planes, growth_rate, nb_layers, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(in_planes + i * growth_rate, growth_rate, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class DenseNet3(nn.Module):
def __init__(self, depth, num_classes, growth_rate=12, reduction=0.5, bottleneck=True, dropRate=0.0, loss_first_part=None):
super(DenseNet3, self).__init__()
in_planes = 2 * growth_rate
n = (depth - 4) / 3
if bottleneck == True:
n = n / 2
block = BottleneckBlock
else:
block = BasicBlock
# 1st conv before any dense block
self.conv1 = nn.Conv2d(3, in_planes, kernel_size=3, stride=1, padding=1, bias=False)
# 1st block
self.block1 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes + n * growth_rate)
self.trans1 = TransitionBlock(in_planes, int(math.floor(in_planes * reduction)), dropRate=dropRate)
in_planes = int(math.floor(in_planes * reduction))
# 2nd block
self.block2 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes + n * growth_rate)
self.trans2 = TransitionBlock(in_planes, int(math.floor(in_planes * reduction)), dropRate=dropRate)
in_planes = int(math.floor(in_planes * reduction))
# 3rd block
self.block3 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes + n * growth_rate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.in_planes = in_planes
#########################################################
#self.classifier = nn.Linear(in_planes, num_classes)
self.classifier = loss_first_part(in_planes, num_classes)
#########################################################
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
#elif isinstance(m, nn.Linear):
# nn.init.constant_(m.bias, 0)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, out.size()[3])
out = out.view(-1, self.in_planes)
return self.classifier(out)
# function to extact the multiple features
def feature_list(self, x):
out_list = []
out = self.conv1(x)
out_list.append(out)
out = self.trans1(self.block1(out))
out_list.append(out)
out = self.trans2(self.block2(out))
out_list.append(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out_list.append(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(-1, self.in_planes)
return self.classifier(out), out_list
def intermediate_forward(self, x, layer_index):
out = self.conv1(x)
if layer_index == 0:
out = self.trans1(self.block1(out))
elif layer_index == 1:
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
elif layer_index == 2:
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
out = self.relu(self.bn1(out))
elif layer_index == 3:
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, out.size()[3])
return out
# function to extact the penultimate features
def penultimate_forward(self, x):
out = self.conv1(x)
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
penultimate = self.relu(self.bn1(out))
out = F.avg_pool2d(penultimate, out.size()[3])
out = out.view(-1, self.in_planes)
return self.classifier(out), penultimate
def logits_features(self, x):
out = self.conv1(x)
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, out.size()[3])
features = out.view(-1, self.in_planes)
logits = self.classifier(features)
return logits, features
| 7,490 | 39.274194 | 127 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/models/__init__.py | from .proper_resnet import *
from .densenet import *
| 53 | 17 | 28 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/models/proper_resnet.py | # code reused from: https://github.com/akamaster/pytorch_resnet_cifar10
'''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch.nn as nn
import torch.nn.functional as F
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x: F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, loss_first_part=None):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
################
#self.linear = nn.Linear(64, num_classes)
self.classifier = loss_first_part(64 * block.expansion, num_classes)
################
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
#elif isinstance(m, nn.Linear):
# nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def logits_features(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
features = out.view(out.size(0), -1)
logits = self.classifier(features)
return logits, features
def feature_list(self, x):
out_list = []
out = F.relu(self.bn1(self.conv1(x)))
out_list.append(out)
out = self.layer1(out)
out_list.append(out)
out = self.layer2(out)
out_list.append(out)
out = self.layer3(out)
out_list.append(out)
out = F.avg_pool2d(out, out.size()[3])
out_list.append(out)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out, out_list
def intermediate_forward(self, x, layer_index):
out = F.relu(self.bn1(self.conv1(x)))
if layer_index == 1:
out = self.layer1(out)
if layer_index == 2:
out = self.layer1(out)
out = self.layer2(out)
if layer_index == 3:
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
if layer_index == 4:
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
return out
def ResNet32(num_c, loss_first_part=None):
return ResNet(BasicBlock, [5, 5, 5], num_classes=num_c, loss_first_part=loss_first_part)
def ResNet56(num_c, loss_first_part=None):
return ResNet(BasicBlock, [9, 9, 9], num_classes=num_c, loss_first_part=loss_first_part)
def ResNet110(num_c, loss_first_part=None):
return ResNet(BasicBlock, [18, 18, 18], num_classes=num_c, loss_first_part=loss_first_part)
| 6,110 | 35.375 | 130 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/agents/classifier.py | import os
import sys
import torch
import models
import loaders
import losses
import statistics
import math
import torchnet as tnt
import numpy as np
import utils
class ClassifierAgent:
def __init__(self, args):
self.args = args
self.epoch = None
# create dataset
image_loaders = loaders.ImageLoader(args)
self.trainset_loader_for_train, self.trainset_loader_for_infer, self.valset_loader, self.outlier_loader = image_loaders.get_loaders()
print("\nDATASET:", args.dataset)
if self.args.loss.split("_")[0] == "softmax":
loss_first_part = losses.SoftMaxLossFirstPart
loss_second_part = losses.SoftMaxLossSecondPart
elif self.args.loss.split("_")[0] == "isomax":
loss_first_part = losses.IsoMaxLossFirstPart
loss_second_part = losses.IsoMaxLossSecondPart
elif self.args.loss.split("_")[0] == "isomaxplus":
loss_first_part = losses.IsoMaxPlusLossFirstPart
loss_second_part = losses.IsoMaxPlusLossSecondPart
else:
sys.exit('You should pass a valid loss to use!!!')
# create model
print("=> creating model '{}'".format(self.args.model_name))
if self.args.model_name == "densenetbc100":
self.model = models.DenseNet3(100, int(self.args.number_of_model_classes), loss_first_part=loss_first_part)
elif self.args.model_name == "resnet110":
self.model = models.ResNet110(num_c=self.args.number_of_model_classes, loss_first_part=loss_first_part)
self.model.cuda()
# print and save model arch
print("\nMODEL:", self.model)
with open(os.path.join(self.args.experiment_path, 'model.arch'), 'w') as file:
print(self.model, file=file)
print("\n$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
utils.print_num_params(self.model)
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n")
# create loss
self.criterion = loss_second_part()
parameters = self.model.parameters()
self.optimizer = torch.optim.SGD(parameters, lr=self.args.original_learning_rate,momentum=self.args.momentum, nesterov=True, weight_decay=args.weight_decay)
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=self.args.learning_rate_decay_epochs, gamma=args.learning_rate_decay_rate)
print("\nTRAIN:", self.criterion, self.optimizer, self.scheduler)
def train_classify(self):
if self.args.execution == 1:
with open(self.args.executions_best_results_file_path, "w") as best_results:
best_results.write(
"DATA,MODEL,LOSS,EXECUTION,EPOCH,TRAIN LOSS,TRAIN ACC1,TRAIN SCALE,"
"TRAIN INTRA_LOGITS MEAN,TRAIN INTRA_LOGITS STD,TRAIN INTER_LOGITS MEAN,TRAIN INTER_LOGITS STD,"
"TRAIN MAX_PROBS MEAN,TRAIN MAX_PROBS STD,TRAIN ENTROPIES MEAN,TRAIN ENTROPIES STD,"
"VALID LOSS,VALID ACC1,VALID SCALE,"
"VALID INTRA_LOGITS MEAN,VALID INTRA_LOGITS STD,VALID INTER_LOGITS MEAN,VALID INTER_LOGITS STD,"
"VALID MAX_PROBS MEAN,VALID MAX_PROBS STD,VALID ENTROPIES MEAN,VALID ENTROPIES STD\n")
with open(self.args.executions_raw_results_file_path, "w") as raw_results:
raw_results.write("DATA,MODEL,LOSS,EXECUTION,EPOCH,SET,METRIC,VALUE\n")
print("\n################ TRAINING AND VALIDATING ################")
best_model_results = {"VALID ACC1": 0}
for self.epoch in range(1, self.args.epochs + 1):
print("\n######## EPOCH:", self.epoch, "OF", self.args.epochs, "########")
######################################################################################################
######################################################################################################
if self.epoch == 1:
if self.args.loss.split("_")[0] == "softmax": # The IsoMax loss variants do not require warm-up!!!
if self.args.model_name == 'resnet110' and self.args.dataset.startswith("cifar100"):
print("Starting warm up training!!!\n" * 10)
# for resnet110 original paper uses lr=0.01 for first 400 minibatches for warm-up
# then switch back. In this setup it will correspond for first epoch.
# Reference:
# Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
# Deep Residual Learning for Image Recognition. arXiv:1512.03385
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.args.original_learning_rate*0.1
######################################################################################################
######################################################################################################
for param_group in self.optimizer.param_groups:
print("\nLEARNING RATE:\t\t", param_group["lr"])
train_loss, train_acc1, train_scale, train_epoch_logits, train_epoch_metrics = self.train_epoch()
######################################################################################################
######################################################################################################
if self.epoch == 1:
if self.args.loss.split("_")[0] == "softmax": # The IsoMax loss variants do not require warm-up!!!
if self.args.model_name == 'resnet110' and self.args.dataset.startswith("cifar100"):
print("Finishing warm up training!!!\n" * 10)
# for resnet110 original paper uses lr=0.01 for first 400 minibatches for warm-up
# then switch back. In this setup it will correspond for first epoch.
# Reference:
# Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
# Deep Residual Learning for Image Recognition. arXiv:1512.03385
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.args.original_learning_rate
######################################################################################################
######################################################################################################
valid_loss, valid_acc1, valid_scale, valid_epoch_logits, valid_epoch_metrics = self.validate_epoch()
self.scheduler.step()
train_intra_logits_mean = statistics.mean(train_epoch_logits["intra"])
train_intra_logits_std = statistics.pstdev(train_epoch_logits["intra"])
train_inter_logits_mean = statistics.mean(train_epoch_logits["inter"])
train_inter_logits_std = statistics.pstdev(train_epoch_logits["inter"])
train_max_probs_mean = statistics.mean(train_epoch_metrics["max_probs"])
train_max_probs_std = statistics.pstdev(train_epoch_metrics["max_probs"])
train_entropies_mean = statistics.mean(train_epoch_metrics["entropies"])
train_entropies_std = statistics.pstdev(train_epoch_metrics["entropies"])
valid_intra_logits_mean = statistics.mean(valid_epoch_logits["intra"])
valid_intra_logits_std = statistics.pstdev(valid_epoch_logits["intra"])
valid_inter_logits_mean = statistics.mean(valid_epoch_logits["inter"])
valid_inter_logits_std = statistics.pstdev(valid_epoch_logits["inter"])
valid_max_probs_mean = statistics.mean(valid_epoch_metrics["max_probs"])
valid_max_probs_std = statistics.pstdev(valid_epoch_metrics["max_probs"])
valid_entropies_mean = statistics.mean(valid_epoch_metrics["entropies"])
valid_entropies_std = statistics.pstdev(valid_epoch_metrics["entropies"])
print("\n####################################################")
print("TRAIN MAX PROB MEAN:\t", train_max_probs_mean)
print("TRAIN MAX PROB STD:\t", train_max_probs_std)
print("VALID MAX PROB MEAN:\t", valid_max_probs_mean)
print("VALID MAX PROB STD:\t", valid_max_probs_std)
print("####################################################\n")
print("\n####################################################")
print("TRAIN ENTROPY MEAN:\t", train_entropies_mean)
print("TRAIN ENTROPY STD:\t", train_entropies_std)
print("VALID ENTROPY MEAN:\t", valid_entropies_mean)
print("VALID ENTROPY STD:\t", valid_entropies_std)
print("####################################################\n")
with open(self.args.executions_raw_results_file_path, "a") as raw_results:
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "LOSS", train_loss))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "ACC1", train_acc1))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "SCALE", train_scale))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "INTRA_LOGITS MEAN", train_intra_logits_mean))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "INTRA_LOGITS STD", train_intra_logits_std))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "INTER_LOGITS MEAN", train_inter_logits_mean))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "INTER_LOGITS STD", train_inter_logits_std))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "MAX_PROBS MEAN", train_max_probs_mean))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "MAX_PROBS STD", train_max_probs_std))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "ENTROPIES MEAN", train_entropies_mean))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "ENTROPIES STD", train_entropies_std))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "LOSS", valid_loss))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "ACC1", valid_acc1))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "SCALE", valid_scale))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "INTRA_LOGITS MEAN", valid_intra_logits_mean))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "INTRA_LOGITS STD", valid_intra_logits_std))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "INTER_LOGITS MEAN", valid_inter_logits_mean))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "INTER_LOGITS STD", valid_inter_logits_std))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "MAX_PROBS MEAN", valid_max_probs_mean))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "MAX_PROBS STD", valid_max_probs_std))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "ENTROPIES MEAN", valid_entropies_mean))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "ENTROPIES STD", valid_entropies_std))
print()
print("TRAIN ==>>\tIALM: {0:.8f}\tIALS: {1:.8f}\tIELM: {2:.8f}\tIELS: {3:.8f}".format(
train_intra_logits_mean, train_intra_logits_std, train_inter_logits_mean, train_inter_logits_std))
print("VALID ==>>\tIALM: {0:.8f}\tIALS: {1:.8f}\tIELM: {2:.8f}\tIELS: {3:.8f}".format(
valid_intra_logits_mean, valid_intra_logits_std, valid_inter_logits_mean, valid_inter_logits_std))
print()
print("\nDATA:", self.args.dataset)
print("MODEL:", self.args.model_name)
print("LOSS:", self.args.loss, "\n")
# if is best
if valid_acc1 > best_model_results["VALID ACC1"]:
print("!+NEW BEST MODEL VALID ACC1!")
best_model_results = {
"DATA": self.args.dataset,
"MODEL": self.args.model_name,
"LOSS": self.args.loss,
"EXECUTION": self.args.execution,
"EPOCH": self.epoch,
"TRAIN LOSS": train_loss,
"TRAIN ACC1": train_acc1,
"TRAIN SCALE": train_scale,
"TRAIN INTRA_LOGITS MEAN": train_intra_logits_mean,
"TRAIN INTRA_LOGITS STD": train_intra_logits_std,
"TRAIN INTER_LOGITS MEAN": train_inter_logits_mean,
"TRAIN INTER_LOGITS STD": train_inter_logits_std,
"TRAIN MAX_PROBS MEAN": train_max_probs_mean,
"TRAIN MAX_PROBS STD": train_max_probs_std,
"TRAIN ENTROPIES MEAN": train_entropies_mean,
"TRAIN ENTROPIES STD": train_entropies_std,
"VALID LOSS": valid_loss,
"VALID ACC1": valid_acc1,
"VALID SCALE": valid_scale,
"VALID INTRA_LOGITS MEAN": valid_intra_logits_mean,
"VALID INTRA_LOGITS STD": valid_intra_logits_std,
"VALID INTER_LOGITS MEAN": valid_inter_logits_mean,
"VALID INTER_LOGITS STD": valid_inter_logits_std,
"VALID MAX_PROBS MEAN": valid_max_probs_mean,
"VALID MAX_PROBS STD": valid_max_probs_std,
"VALID ENTROPIES MEAN": valid_entropies_mean,
"VALID ENTROPIES STD": valid_entropies_std,}
print("!+NEW BEST MODEL VALID ACC1:\t\t{0:.4f} IN EPOCH {1}! SAVING {2}\n".format(
valid_acc1, self.epoch, self.args.best_model_file_path))
torch.save(self.model.state_dict(), self.args.best_model_file_path)
np.save(os.path.join(
self.args.experiment_path, "best_model"+str(self.args.execution)+"_train_epoch_logits.npy"), train_epoch_logits)
np.save(os.path.join(
self.args.experiment_path, "best_model"+str(self.args.execution)+"_train_epoch_metrics.npy"), train_epoch_metrics)
np.save(os.path.join(
self.args.experiment_path, "best_model"+str(self.args.execution)+"_valid_epoch_logits.npy"), valid_epoch_logits)
np.save(os.path.join(
self.args.experiment_path, "best_model"+str(self.args.execution)+"_valid_epoch_metrics.npy"), valid_epoch_metrics)
print('!$$$$ BEST MODEL TRAIN ACC1:\t\t{0:.4f}'.format(best_model_results["TRAIN ACC1"]))
print('!$$$$ BEST MODEL VALID ACC1:\t\t{0:.4f}'.format(best_model_results["VALID ACC1"]))
with open(self.args.executions_best_results_file_path, "a") as best_results:
best_results.write("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(
best_model_results["DATA"],
best_model_results["MODEL"],
best_model_results["LOSS"],
best_model_results["EXECUTION"],
best_model_results["EPOCH"],
best_model_results["TRAIN LOSS"],
best_model_results["TRAIN ACC1"],
best_model_results["TRAIN SCALE"],
best_model_results["TRAIN INTRA_LOGITS MEAN"],
best_model_results["TRAIN INTRA_LOGITS STD"],
best_model_results["TRAIN INTER_LOGITS MEAN"],
best_model_results["TRAIN INTER_LOGITS STD"],
best_model_results["TRAIN MAX_PROBS MEAN"],
best_model_results["TRAIN MAX_PROBS STD"],
best_model_results["TRAIN ENTROPIES MEAN"],
best_model_results["TRAIN ENTROPIES STD"],
best_model_results["VALID LOSS"],
best_model_results["VALID ACC1"],
best_model_results["VALID SCALE"],
best_model_results["VALID INTRA_LOGITS MEAN"],
best_model_results["VALID INTRA_LOGITS STD"],
best_model_results["VALID INTER_LOGITS MEAN"],
best_model_results["VALID INTER_LOGITS STD"],
best_model_results["VALID MAX_PROBS MEAN"],
best_model_results["VALID MAX_PROBS STD"],
best_model_results["VALID ENTROPIES MEAN"],
best_model_results["VALID ENTROPIES STD"],))
print()
def train_epoch(self):
print()
# switch to train mode
self.model.train()
# Meters
loss_meter = utils.MeanMeter()
accuracy_meter = tnt.meter.ClassErrorMeter(topk=[1], accuracy=True)
epoch_logits = {"intra": [], "inter": []}
epoch_metrics = {"max_probs": [], "entropies": [], "max_logits": []}
batch_index = 0
for in_data in self.trainset_loader_for_train:
batch_index += 1
inputs = in_data[0].cuda()
targets = in_data[1].cuda(non_blocking=True)
outputs = self.model(inputs)
loss, scale, intra_logits, inter_logits = self.criterion(outputs, targets, debug=True)
max_logits = outputs.max(dim=1)[0]
probabilities = torch.nn.Softmax(dim=1)(outputs)
max_probs = probabilities.max(dim=1)[0]
entropies = utils.entropies_from_probabilities(probabilities)
loss_meter.add(loss.item(), targets.size(0))
accuracy_meter.add(outputs.detach(), targets.detach())
intra_logits = intra_logits.tolist()
inter_logits = inter_logits.tolist()
if self.args.number_of_model_classes > 100:
epoch_logits["intra"] = intra_logits
epoch_logits["inter"] = inter_logits
else:
epoch_logits["intra"] += intra_logits
epoch_logits["inter"] += inter_logits
epoch_metrics["max_probs"] += max_probs.tolist()
epoch_metrics["max_logits"] += max_logits.tolist()
epoch_metrics["entropies"] += (entropies/math.log(self.args.number_of_model_classes)).tolist() # normalized entropy!!!
# zero grads, compute gradients and do optimizer step
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if batch_index % self.args.print_freq == 0:
print('Train Epoch: [{0}][{1:3}/{2}]\t'
'Loss {loss:.8f}\t\t'
'Acc1 {acc1_meter:.2f}\t'
'IALM {intra_logits_mean:.4f}\t'
'IALS {intra_logits_std:.8f}\t\t'
'IELM {inter_logits_mean:.4f}\t'
'IELS {inter_logits_std:.8f}'
.format(self.epoch, batch_index, len(self.trainset_loader_for_train),
loss=loss_meter.avg,
acc1_meter=accuracy_meter.value()[0],
intra_logits_mean=statistics.mean(intra_logits),
intra_logits_std=statistics.stdev(intra_logits),
inter_logits_mean=statistics.mean(inter_logits),
inter_logits_std=statistics.stdev(inter_logits),))
print('\n#### TRAIN ACC1:\t{0:.4f}\n\n'.format(accuracy_meter.value()[0]))
return loss_meter.avg, accuracy_meter.value()[0], scale, epoch_logits, epoch_metrics
def validate_epoch(self):
print()
# switch to evaluate mode
self.model.eval()
# Meters
loss_meter = utils.MeanMeter()
accuracy_meter = tnt.meter.ClassErrorMeter(topk=[1], accuracy=True)
epoch_logits = {"intra": [], "inter": []}
epoch_metrics = {"max_probs": [], "entropies": [], "max_logits": []}
with torch.no_grad():
batch_index = 0
for in_data in self.valset_loader:
batch_index += 1
inputs = in_data[0].cuda()
targets = in_data[1].cuda(non_blocking=True)
outputs = self.model(inputs)
loss, scale, intra_logits, inter_logits = self.criterion(outputs, targets, debug=True)
max_logits = outputs.max(dim=1)[0]
probabilities = torch.nn.Softmax(dim=1)(outputs)
max_probs = probabilities.max(dim=1)[0]
entropies = utils.entropies_from_probabilities(probabilities)
loss_meter.add(loss.item(), inputs.size(0))
accuracy_meter.add(outputs.detach(), targets.detach())
intra_logits = intra_logits.tolist()
inter_logits = inter_logits.tolist()
if self.args.number_of_model_classes > 100:
epoch_logits["intra"] = intra_logits
epoch_logits["inter"] = inter_logits
else:
epoch_logits["intra"] += intra_logits
epoch_logits["inter"] += inter_logits
epoch_metrics["max_probs"] += max_probs.tolist()
epoch_metrics["max_logits"] += max_logits.tolist()
epoch_metrics["entropies"] += (entropies/math.log(self.args.number_of_model_classes)).tolist() # normalized entropy!!!
if batch_index % self.args.print_freq == 0:
print('Valid Epoch: [{0}][{1:3}/{2}]\t'
'Loss {loss:.8f}\t\t'
'Acc1 {acc1_meter:.2f}\t'
'IALM {intra_logits_mean:.4f}\t'
'IALS {intra_logits_std:.8f}\t\t'
'IELM {inter_logits_mean:.4f}\t'
'IELS {inter_logits_std:.8f}'
.format(self.epoch, batch_index, len(self.valset_loader),
loss=loss_meter.avg,
acc1_meter=accuracy_meter.value()[0],
intra_logits_mean=statistics.mean(intra_logits),
intra_logits_std=statistics.stdev(intra_logits),
inter_logits_mean=statistics.mean(inter_logits),
inter_logits_std=statistics.stdev(inter_logits),))
print('\n#### VALID ACC1:\t{0:.4f}\n\n'.format(accuracy_meter.value()[0]))
return loss_meter.avg, accuracy_meter.value()[0], scale, epoch_logits, epoch_metrics
| 25,791 | 58.842227 | 164 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/agents/__init__.py | from .classifier import *
| 26 | 12.5 | 25 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/utils/procedures.py | import os
import pickle
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch
import torch.nn.functional as F
import csv
import numpy as np
from sklearn import metrics
def compute_weights(iterable):
return [sum(iterable) / (iterable[i] * len(iterable)) if iterable[i] != 0 else float("inf") for i in range(len(iterable))]
def print_format(iterable):
return ["{0:.8f}".format(i) if i is not float("inf") else "{0}".format(i) for i in iterable]
def probabilities(outputs):
return F.softmax(outputs, dim=1)
def max_probabilities(outputs):
return F.softmax(outputs, dim=1).max(dim=1)[0]
def predictions(outputs):
return outputs.argmax(dim=1)
def predictions_total(outputs):
return outputs.argmax(dim=1).bincount(minlength=outputs.size(1)).tolist()
def entropies(outputs):
probabilities_log_probabilities = F.softmax(outputs, dim=1) * F.log_softmax(outputs, dim=1)
return -1.0 * probabilities_log_probabilities.sum(dim=1)
def entropies_grads(outputs):
entropy_grads = - (1.0 + F.log_softmax(outputs, dim=1))
return entropy_grads.sum(dim=0).tolist()
def cross_entropies(outputs, targets):
return - 1.0 * F.log_softmax(outputs, dim=1)[range(outputs.size(0)), targets]
def cross_entropies_grads(outputs, targets):
cross_entropies_grads = [0 for i in range(outputs.size(1))]
for i in range(len(predictions(outputs))):
cross_entropies_grads[predictions(outputs)[i]] += - (1.0 - (F.softmax(outputs, dim=1)[i, targets[i]].item()))
return cross_entropies_grads
def make_equitable(outputs, criterion, weights):
weights = torch.Tensor(weights).cuda()
weights.requires_grad = False
return weights[predictions(outputs)] * criterion[range(outputs.size(0))]
def entropies_from_logits(logits):
return -(F.softmax(logits, dim=1) * F.log_softmax(logits, dim=1)).sum(dim=1)
def entropies_from_probabilities(probabilities):
if len(probabilities.size()) == 2:
return -(probabilities * torch.log(probabilities)).sum(dim=1)
elif len(probabilities.size()) == 3:
return -(probabilities * torch.log(probabilities)).sum(dim=2).mean(dim=1)
def save_object(object, path, file):
with open(os.path.join(path, file + '.pkl'), 'wb') as f:
pickle.dump(object, f, pickle.HIGHEST_PROTOCOL)
def load_object(path, file):
with open(os.path.join(path, file + '.pkl'), 'rb') as f:
return pickle.load(f)
def save_dict_list_to_csv(dict_list, path, file):
with open(os.path.join(path, file + '.csv'), 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=dict_list[0].keys())
writer.writeheader()
for dict in dict_list:
writer.writerow(dict)
def load_dict_list_from_csv(path, file):
dict_list = []
with open(os.path.join(path, file + '.csv'), 'r') as csvfile:
reader = csv.DictReader(csvfile)
for dict in reader:
dict_list.append(dict)
return dict_list
class MeanMeter(object):
"""computes and stores the current averaged current mean"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def add(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def purity(y_true, y_pred):
"""compute contingency matrix (also called confusion matrix)"""
contingency_matrix = metrics.cluster.contingency_matrix(y_true, y_pred)
return np.sum(np.amax(contingency_matrix, axis=0)) / np.sum(contingency_matrix)
def asinh(x):
return torch.log(x+(x**2+1)**0.5)
def acosh(x):
return torch.log(x+(x**2-1)**0.5)
def atanh(x):
return 0.5*torch.log(((1+x)/((1-x)+0.000001))+0.000001)
def sinh(x):
return (torch.exp(x)-torch.exp(-x))/2
def cosine_similarity(features, prototypes):
return F.cosine_similarity(features.unsqueeze(2), prototypes.t().unsqueeze(0), dim=1, eps=1e-6)
def mahalanobis_distances(features, prototypes, precisions):
diff = features.unsqueeze(2) - prototypes.t().unsqueeze(0)
diff2 = features.t().unsqueeze(0) - prototypes.unsqueeze(2)
precision_diff = torch.matmul(precisions.unsqueeze(0), diff)
extended_product = torch.matmul(diff2.permute(2, 0, 1), precision_diff)
mahalanobis_square = torch.diagonal(extended_product, offset=0, dim1=1, dim2=2)
mahalanobis = torch.sqrt(mahalanobis_square)
return mahalanobis
def multiprecisions_mahalanobis_distances(features, prototypes, multiprecisions):
mahalanobis_square = torch.Tensor(features.size(0), prototypes.size(0)).cuda()
for prototype in range(prototypes.size(0)):
diff = features - prototypes[prototype]
multiprecisions.unsqueeze(0)
diff.unsqueeze(2)
precision_diff = torch.matmul(multiprecisions.unsqueeze(0), diff.unsqueeze(2))
product = torch.matmul(diff.unsqueeze(1), precision_diff).squeeze()
mahalanobis_square[:, prototype] = product
mahalanobis = torch.sqrt(mahalanobis_square)
return mahalanobis
def rand_bbox(size, lam):
W = size[2]
H = size[3]
#print("calling randbox")
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
"""
#print("calling randbox")
r = 0.5 + np.random.rand(1)/2
s = 0.5/r
if np.random.rand(1) < 0.5:
r, s = s, r
#print(r)
#print(s)
#print(r * s)
cut_w = np.int(W * r)
cut_h = np.int(H * s)
"""
#cx = np.random.randint(W)
#cy = np.random.randint(H)
cx = np.random.randint(cut_w // 2, high=W - cut_w // 2)
cy = np.random.randint(cut_h // 2, high=H - cut_h // 2)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def print_num_params(model, display_all_modules=False):
total_num_params = 0
for n, p in model.named_parameters():
num_params = 1
for s in p.shape:
num_params *= s
if display_all_modules: print("{}: {}".format(n, num_params))
total_num_params += num_params
print("total number of parameters: {:.2e}".format(total_num_params))
| 6,271 | 29.595122 | 126 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/utils/__init__.py | from .procedures import *
| 26 | 12.5 | 25 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/losses/softmax.py | import torch.nn as nn
import torch
import math
class SoftMaxLossFirstPart(nn.Module):
def __init__(self, num_features, num_classes, temperature=1.0):
super(SoftMaxLossFirstPart, self).__init__()
self.num_features = num_features
self.num_classes = num_classes
self.temperature = temperature
self.weights = nn.Parameter(torch.Tensor(num_classes, num_features))
self.bias = nn.Parameter(torch.Tensor(num_classes))
nn.init.uniform_(self.weights, a=-math.sqrt(1.0/self.num_features), b=math.sqrt(1.0/self.num_features))
nn.init.zeros_(self.bias)
def forward(self, features):
logits = features.matmul(self.weights.t()) + self.bias
# The temperature may be calibrated after training to improve uncertainty estimation.
return logits / self.temperature
class SoftMaxLossSecondPart(nn.Module):
def __init__(self):
super(SoftMaxLossSecondPart, self).__init__()
self.loss = nn.CrossEntropyLoss()
def forward(self, logits, targets, debug=False):
loss = self.loss(logits, targets)
if not debug:
return loss
else:
targets_one_hot = torch.eye(logits.size(1))[targets].long().cuda()
intra_inter_logits = torch.where(targets_one_hot != 0, logits, torch.Tensor([float('Inf')]).cuda())
inter_intra_logits = torch.where(targets_one_hot != 0, torch.Tensor([float('Inf')]).cuda(), logits)
intra_logits = intra_inter_logits[intra_inter_logits != float('Inf')]
inter_logits = inter_intra_logits[inter_intra_logits != float('Inf')]
return loss, 1.0, intra_logits, inter_logits
| 1,694 | 41.375 | 111 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/losses/isomaxplus.py | import torch.nn as nn
import torch.nn.functional as F
import torch
class IsoMaxPlusLossFirstPart(nn.Module):
"""This part replaces the model classifier output layer nn.Linear()"""
def __init__(self, num_features, num_classes, temperature=1.0):
super(IsoMaxPlusLossFirstPart, self).__init__()
self.num_features = num_features
self.num_classes = num_classes
self.temperature = temperature
self.prototypes = nn.Parameter(torch.Tensor(num_classes, num_features))
self.distance_scale = nn.Parameter(torch.Tensor(1))
nn.init.normal_(self.prototypes, mean=0.0, std=1.0)
nn.init.constant_(self.distance_scale, 1.0)
def forward(self, features):
distances = torch.abs(self.distance_scale) * torch.cdist(F.normalize(features), F.normalize(self.prototypes), p=2.0, compute_mode="donot_use_mm_for_euclid_dist")
logits = -distances
# The temperature may be calibrated after training to improve uncertainty estimation.
return logits / self.temperature
class IsoMaxPlusLossSecondPart(nn.Module):
"""This part replaces the nn.CrossEntropyLoss()"""
def __init__(self, entropic_scale=10.0):
super(IsoMaxPlusLossSecondPart, self).__init__()
self.entropic_scale = entropic_scale
def forward(self, logits, targets, debug=False):
#############################################################################
#############################################################################
"""Probabilities and logarithms are calculated separately and sequentially"""
"""Therefore, nn.CrossEntropyLoss() must not be used to calculate the loss"""
#############################################################################
#############################################################################
distances = -logits
probabilities_for_training = nn.Softmax(dim=1)(-self.entropic_scale * distances)
probabilities_at_targets = probabilities_for_training[range(distances.size(0)), targets]
loss = -torch.log(probabilities_at_targets).mean()
if not debug:
return loss
else:
targets_one_hot = torch.eye(distances.size(1))[targets].long().cuda()
intra_inter_distances = torch.where(targets_one_hot != 0, distances, torch.Tensor([float('Inf')]).cuda())
inter_intra_distances = torch.where(targets_one_hot != 0, torch.Tensor([float('Inf')]).cuda(), distances)
intra_distances = intra_inter_distances[intra_inter_distances != float('Inf')]
inter_distances = inter_intra_distances[inter_intra_distances != float('Inf')]
return loss, 1.0, intra_distances, inter_distances
| 2,771 | 52.307692 | 169 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/losses/__init__.py | from .softmax import *
from .isomax import *
from .isomaxplus import *
| 71 | 17 | 25 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/losses/isomax.py | import torch.nn as nn
import torch.nn.functional as F
import torch
class IsoMaxLossFirstPart(nn.Module):
"""This part replaces the model classifier output layer nn.Linear()"""
def __init__(self, num_features, num_classes, temperature=1.0):
super(IsoMaxLossFirstPart, self).__init__()
self.num_features = num_features
self.num_classes = num_classes
self.temperature = temperature
self.prototypes = nn.Parameter(torch.Tensor(num_classes, num_features))
nn.init.constant_(self.prototypes, 0.0)
def forward(self, features):
distances = torch.cdist(features, self.prototypes, p=2.0, compute_mode="donot_use_mm_for_euclid_dist")
logits = -distances
# The temperature may be calibrated after training to improve uncertainty estimation.
return logits / self.temperature
class IsoMaxLossSecondPart(nn.Module):
"""This part replaces the nn.CrossEntropyLoss()"""
def __init__(self, entropic_scale=10.0):
super(IsoMaxLossSecondPart, self).__init__()
self.entropic_scale = entropic_scale
def forward(self, logits, targets, debug=False):
#############################################################################
#############################################################################
"""Probabilities and logarithms are calculated separately and sequentially"""
"""Therefore, nn.CrossEntropyLoss() must not be used to calculate the loss"""
#############################################################################
#############################################################################
distances = -logits
probabilities_for_training = nn.Softmax(dim=1)(-self.entropic_scale * distances)
probabilities_at_targets = probabilities_for_training[range(distances.size(0)), targets]
loss = -torch.log(probabilities_at_targets).mean()
if not debug:
return loss
else:
targets_one_hot = torch.eye(distances.size(1))[targets].long().cuda()
intra_inter_distances = torch.where(targets_one_hot != 0, distances, torch.Tensor([float('Inf')]).cuda())
inter_intra_distances = torch.where(targets_one_hot != 0, torch.Tensor([float('Inf')]).cuda(), distances)
intra_distances = intra_inter_distances[intra_inter_distances != float('Inf')]
inter_distances = inter_intra_distances[inter_intra_distances != float('Inf')]
return loss, 1.0, intra_distances, inter_distances
| 2,571 | 50.44 | 117 | py |
stable-continual-learning | stable-continual-learning-master/__init__.py | 0 | 0 | 0 | py |
|
stable-continual-learning | stable-continual-learning-master/stable_sgd/main.py | import os
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
from stable_sgd.models import MLP, ResNet18
from stable_sgd.data_utils import get_permuted_mnist_tasks, get_rotated_mnist_tasks, get_split_cifar100_tasks
from stable_sgd.utils import parse_arguments, DEVICE, init_experiment, end_experiment, log_metrics, log_hessian, save_checkpoint
def train_single_epoch(net, optimizer, loader, criterion, task_id=None):
"""
Train the model for a single epoch
:param net:
:param optimizer:
:param loader:
:param criterion:
:param task_id:
:return:
"""
net = net.to(DEVICE)
net.train()
for batch_idx, (data, target) in enumerate(loader):
data = data.to(DEVICE)
target = target.to(DEVICE)
optimizer.zero_grad()
if task_id:
pred = net(data, task_id)
else:
pred = net(data)
loss = criterion(pred, target)
loss.backward()
optimizer.step()
return net
def eval_single_epoch(net, loader, criterion, task_id=None):
"""
Evaluate the model for single epoch
:param net:
:param loader:
:param criterion:
:param task_id:
:return:
"""
net = net.to(DEVICE)
net.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in loader:
data = data.to(DEVICE)
target = target.to(DEVICE)
# for cifar head
if task_id is not None:
output = net(data, task_id)
else:
output = net(data)
test_loss += criterion(output, target).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(loader.dataset)
correct = correct.to('cpu')
avg_acc = 100.0 * float(correct.numpy()) / len(loader.dataset)
return {'accuracy': avg_acc, 'loss': test_loss}
def get_benchmark_data_loader(args):
"""
Returns the benchmark loader which could be either of these:
get_split_cifar100_tasks, get_permuted_mnist_tasks, or get_rotated_mnist_tasks
:param args:
:return: a function which when called, returns all tasks
"""
if args.dataset == 'perm-mnist' or args.dataset == 'permuted-mnist':
return get_permuted_mnist_tasks
elif args.dataset == 'rot-mnist' or args.dataset == 'rotation-mnist':
return get_rotated_mnist_tasks
elif args.dataset == 'cifar-100' or args.dataset == 'cifar100':
return get_split_cifar100_tasks
else:
raise Exception("Unknown dataset.\n"+
"The code supports 'perm-mnist, rot-mnist, and cifar-100.")
def get_benchmark_model(args):
"""
Return the corresponding PyTorch model for experiment
:param args:
:return:
"""
if 'mnist' in args.dataset:
if args.tasks == 20 and args.hiddens < 256:
print("Warning! the main paper MLP with 256 neurons for experiment with 20 tasks")
return MLP(args.hiddens, {'dropout': args.dropout}).to(DEVICE)
elif 'cifar' in args.dataset:
return ResNet18(config={'dropout': args.dropout}).to(DEVICE)
else:
raise Exception("Unknown dataset.\n"+
"The code supports 'perm-mnist, rot-mnist, and cifar-100.")
def run(args):
"""
Run a single run of experiment.
:param args: please see `utils.py` for arguments and options
"""
# init experiment
acc_db, loss_db, hessian_eig_db = init_experiment(args)
# load benchmarks and model
print("Loading {} tasks for {}".format(args.tasks, args.dataset))
tasks = get_benchmark_data_loader(args)(args.tasks, args.batch_size)
print("loaded all tasks!")
model = get_benchmark_model(args)
# criterion
criterion = nn.CrossEntropyLoss().to(DEVICE)
time = 0
for current_task_id in range(1, args.tasks+1):
print("================== TASK {} / {} =================".format(current_task_id, args.tasks))
train_loader = tasks[current_task_id]['train']
lr = max(args.lr * args.gamma ** (current_task_id), 0.00005)
for epoch in range(1, args.epochs_per_task+1):
# 1. train and save
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.8)
train_single_epoch(model, optimizer, train_loader, criterion, current_task_id)
time += 1
# 2. evaluate on all tasks up to now, including the current task
for prev_task_id in range(1, current_task_id+1):
# 2.0. only evaluate once a task is finished
if epoch == args.epochs_per_task:
model = model.to(DEVICE)
val_loader = tasks[prev_task_id]['test']
# 2.1. compute accuracy and loss
metrics = eval_single_epoch(model, val_loader, criterion, prev_task_id)
acc_db, loss_db = log_metrics(metrics, time, prev_task_id, acc_db, loss_db)
# 2.2. (optional) compute eigenvalues and eigenvectors of Loss Hessian
if prev_task_id == current_task_id and args.compute_eigenspectrum:
hessian_eig_db = log_hessian(model, val_loader, time, prev_task_id, hessian_eig_db)
# 2.3. save model parameters
save_checkpoint(model, time)
end_experiment(args, acc_db, loss_db, hessian_eig_db)
if __name__ == "__main__":
args = parse_arguments()
run(args) | 4,867 | 29.425 | 128 | py |
stable-continual-learning | stable-continual-learning-master/stable_sgd/utils.py | import uuid
import torch
import argparse
import matplotlib
import numpy as np
import pandas as pd
matplotlib.use('Agg')
import seaborn as sns
from pathlib import Path
import matplotlib.pyplot as plt
from external_libs.hessian_eigenthings import compute_hessian_eigenthings
TRIAL_ID = uuid.uuid4().hex.upper()[0:6]
EXPERIMENT_DIRECTORY = './outputs/{}'.format(TRIAL_ID)
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
def parse_arguments():
parser = argparse.ArgumentParser(description='Argument parser')
parser.add_argument('--tasks', default=5, type=int, help='total number of tasks')
parser.add_argument('--epochs-per-task', default=1, type=int, help='epochs per task')
parser.add_argument('--dataset', default='rot-mnist', type=str, help='dataset. options: rot-mnist, perm-mnist, cifar100')
parser.add_argument('--batch-size', default=10, type=int, help='batch-size')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--gamma', default=0.4, type=float, help='learning rate decay. Use 1.0 for no decay')
parser.add_argument('--dropout', default=0.25, type=float, help='dropout probability. Use 0.0 for no dropout')
parser.add_argument('--hiddens', default=256, type=int, help='num of hidden neurons in each layer of a 2-layer MLP')
parser.add_argument('--compute-eigenspectrum', default=False, type=bool, help='compute eigenvalues/eigenvectors?')
parser.add_argument('--seed', default=1234, type=int, help='random seed')
args = parser.parse_args()
return args
def init_experiment(args):
print('------------------- Experiment started -----------------')
print(f"Parameters:\n seed={args.seed}\n benchmark={args.dataset}\n num_tasks={args.tasks}\n "+
f"epochs_per_task={args.epochs_per_task}\n batch_size={args.batch_size}\n "+
f"learning_rate={args.lr}\n learning rate decay(gamma)={args.gamma}\n dropout prob={args.dropout}\n")
# 1. setup seed for reproducibility
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. create directory to save results
Path(EXPERIMENT_DIRECTORY).mkdir(parents=True, exist_ok=True)
print("The results will be saved in {}\n".format(EXPERIMENT_DIRECTORY))
# 3. create data structures to store metrics
loss_db = {t: [0 for i in range(args.tasks*args.epochs_per_task)] for t in range(1, args.tasks+1)}
acc_db = {t: [0 for i in range(args.tasks*args.epochs_per_task)] for t in range(1, args.tasks+1)}
hessian_eig_db = {}
return acc_db, loss_db, hessian_eig_db
def end_experiment(args, acc_db, loss_db, hessian_eig_db):
# 1. save all metrics into csv file
acc_df = pd.DataFrame(acc_db)
acc_df.to_csv(EXPERIMENT_DIRECTORY+'/accs.csv')
visualize_result(acc_df, EXPERIMENT_DIRECTORY+'/accs.png')
loss_df = pd.DataFrame(loss_db)
loss_df.to_csv(EXPERIMENT_DIRECTORY+'/loss.csv')
visualize_result(loss_df, EXPERIMENT_DIRECTORY+'/loss.png')
hessian_df = pd.DataFrame(hessian_eig_db)
hessian_df.to_csv(EXPERIMENT_DIRECTORY+'/hessian_eigs.csv')
# 2. calculate average accuracy and forgetting (c.f. ``evaluation`` section in our paper)
score = np.mean([acc_db[i][-1] for i in acc_db.keys()])
forget = np.mean([max(acc_db[i])-acc_db[i][-1] for i in range(1, args.tasks)])/100.0
print('average accuracy = {}, forget = {}'.format(score, forget))
print()
print('------------------- Experiment ended -----------------')
def log_metrics(metrics, time, task_id, acc_db, loss_db):
"""
Log accuracy and loss at different times of training
"""
print('epoch {}, task:{}, metrics: {}'.format(time, task_id, metrics))
# log to db
acc = metrics['accuracy']
loss = metrics['loss']
loss_db[task_id][time-1] = loss
acc_db[task_id][time-1] = acc
return acc_db, loss_db
def save_eigenvec(filename, arr):
"""
Save eigenvectors to file
"""
np.save(filename, arr)
def log_hessian(model, loader, time, task_id, hessian_eig_db):
"""
Compute and log Hessian for a specific task
:param model: The PyTorch Model
:param loader: Dataloader [to calculate loss and then Hessian]
:param time: time is a discrete concept regarding epoch. If we have T tasks each with E epoch,
time will be from 0, to (T x E)-1. E.g., if we have 5 tasks with 5 epochs each, then when we finish
task 1, time will be 5.
:param task_id: Task id (to distiniguish between Hessians of different tasks)
:param hessian_eig_db: (The dictionary to store hessians)
:return:
"""
criterion = torch.nn.CrossEntropyLoss().to(DEVICE)
use_gpu = True if DEVICE != 'cpu' else False
est_eigenvals, est_eigenvecs = compute_hessian_eigenthings(
model,
loader,
criterion,
num_eigenthings=3,
power_iter_steps=18,
power_iter_err_threshold=1e-5,
momentum=0,
use_gpu=use_gpu,
)
key = 'task-{}-epoch-{}'.format(task_id, time-1)
hessian_eig_db[key] = est_eigenvals
save_eigenvec(EXPERIMENT_DIRECTORY+"/{}-vec.npy".format(key), est_eigenvecs)
return hessian_eig_db
def save_checkpoint(model, time):
"""
Save checkpoints of model paramters
:param model: pytorch model
:param time: int
"""
filename = '{directory}/model-{trial}-{time}.pth'.format(directory=EXPERIMENT_DIRECTORY, trial=TRIAL_ID, time=time)
torch.save(model.cpu().state_dict(), filename)
def visualize_result(df, filename):
ax = sns.lineplot(data=df, dashes=False)
ax.figure.savefig(filename, dpi=250)
plt.close()
| 5,329 | 35.758621 | 122 | py |
stable-continual-learning | stable-continual-learning-master/stable_sgd/data_utils.py | import numpy as np
import torch
import torchvision
from torch.utils.data import TensorDataset, DataLoader
import torchvision.transforms.functional as TorchVisionFunc
def get_permuted_mnist(task_id, batch_size):
"""
Get the dataset loaders (train and test) for a `single` task of permuted MNIST.
This function will be called several times for each task.
:param task_id: id of the task [starts from 1]
:param batch_size:
:return: a tuple: (train loader, test loader)
"""
# convention, the first task will be the original MNIST images, and hence no permutation
if task_id == 1:
idx_permute = np.array(range(784))
else:
idx_permute = torch.from_numpy(np.random.RandomState().permutation(784))
transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),
torchvision.transforms.Lambda(lambda x: x.view(-1)[idx_permute] ),
])
mnist_train = torchvision.datasets.MNIST('./data/', train=True, download=True, transform=transforms)
train_loader = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, num_workers=4, pin_memory=True, shuffle=True)
test_loader = torch.utils.data.DataLoader(torchvision.datasets.MNIST('./data/', train=False, download=True, transform=transforms), batch_size=256, shuffle=False, num_workers=4, pin_memory=True)
return train_loader, test_loader
def get_permuted_mnist_tasks(num_tasks, batch_size):
"""
Returns the datasets for sequential tasks of permuted MNIST
:param num_tasks: number of tasks.
:param batch_size: batch-size for loaders.
:return: a dictionary where each key is a dictionary itself with train, and test loaders.
"""
datasets = {}
for task_id in range(1, num_tasks+1):
train_loader, test_loader = get_permuted_mnist(task_id, batch_size)
datasets[task_id] = {'train': train_loader, 'test': test_loader}
return datasets
class RotationTransform:
"""
Rotation transforms for the images in `Rotation MNIST` dataset.
"""
def __init__(self, angle):
self.angle = angle
def __call__(self, x):
return TorchVisionFunc.rotate(x, self.angle, fill=(0,))
def get_rotated_mnist(task_id, batch_size):
"""
Returns the dataset for a single task of Rotation MNIST dataset
:param task_id:
:param batch_size:
:return:
"""
per_task_rotation = 10
rotation_degree = (task_id - 1)*per_task_rotation
rotation_degree -= (np.random.random()*per_task_rotation)
transforms = torchvision.transforms.Compose([
RotationTransform(rotation_degree),
torchvision.transforms.ToTensor(),
])
train_loader = torch.utils.data.DataLoader(torchvision.datasets.MNIST('./data/', train=True, download=True, transform=transforms), batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
test_loader = torch.utils.data.DataLoader(torchvision.datasets.MNIST('./data/', train=False, download=True, transform=transforms), batch_size=256, shuffle=False, num_workers=4, pin_memory=True)
return train_loader, test_loader
def get_rotated_mnist_tasks(num_tasks, batch_size):
"""
Returns data loaders for all tasks of rotation MNIST dataset.
:param num_tasks: number of tasks in the benchmark.
:param batch_size:
:return:
"""
datasets = {}
for task_id in range(1, num_tasks+1):
train_loader, test_loader = get_rotated_mnist(task_id, batch_size)
datasets[task_id] = {'train': train_loader, 'test': test_loader}
return datasets
def get_split_cifar100(task_id, batch_size, cifar_train, cifar_test):
"""
Returns a single task of split CIFAR-100 dataset
:param task_id:
:param batch_size:
:return:
"""
start_class = (task_id-1)*5
end_class = task_id * 5
targets_train = torch.tensor(cifar_train.targets)
target_train_idx = ((targets_train >= start_class) & (targets_train < end_class))
targets_test = torch.tensor(cifar_test.targets)
target_test_idx = ((targets_test >= start_class) & (targets_test < end_class))
train_loader = torch.utils.data.DataLoader(torch.utils.data.dataset.Subset(cifar_train, np.where(target_train_idx==1)[0]), batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.dataset.Subset(cifar_test, np.where(target_test_idx==1)[0]), batch_size=batch_size)
return train_loader, test_loader
def get_split_cifar100_tasks(num_tasks, batch_size):
"""
Returns data loaders for all tasks of split CIFAR-100
:param num_tasks:
:param batch_size:
:return:
"""
datasets = {}
# convention: tasks starts from 1 not 0 !
# task_id = 1 (i.e., first task) => start_class = 0, end_class = 4
cifar_transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),])
cifar_train = torchvision.datasets.CIFAR100('./data/', train=True, download=True, transform=cifar_transforms)
cifar_test = torchvision.datasets.CIFAR100('./data/', train=False, download=True, transform=cifar_transforms)
for task_id in range(1, num_tasks+1):
train_loader, test_loader = get_split_cifar100(task_id, batch_size, cifar_train, cifar_test)
datasets[task_id] = {'train': train_loader, 'test': test_loader}
return datasets
# if __name__ == "__main__":
# dataset = get_split_cifar100(1)
| 5,087 | 34.333333 | 200 | py |
stable-continual-learning | stable-continual-learning-master/stable_sgd/models.py | import torch
import torch.nn as nn
from torch.nn.functional import relu, avg_pool2d
class MLP(nn.Module):
"""
Two layer MLP for MNIST benchmarks.
"""
def __init__(self, hiddens, config):
super(MLP, self).__init__()
self.W1 = nn.Linear(784, hiddens)
self.relu = nn.ReLU(inplace=True)
self.dropout_1 = nn.Dropout(p=config['dropout'])
self.W2 = nn.Linear(hiddens, hiddens)
self.dropout_2 = nn.Dropout(p=config['dropout'])
self.W3 = nn.Linear(hiddens, 10)
def forward(self, x, task_id=None):
x = x.view(-1, 784)
out = self.W1(x)
out = self.relu(out)
out = self.dropout_1(out)
out = self.W2(out)
out = self.relu(out)
out = self.dropout_2(out)
out = self.W3(out)
return out
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, config={}):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.conv2 = conv3x3(planes, planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
stride=stride, bias=False),
)
self.IC1 = nn.Sequential(
nn.BatchNorm2d(planes),
nn.Dropout(p=config['dropout'])
)
self.IC2 = nn.Sequential(
nn.BatchNorm2d(planes),
nn.Dropout(p=config['dropout'])
)
def forward(self, x):
out = self.conv1(x)
out = relu(out)
out = self.IC1(out)
out += self.shortcut(x)
out = relu(out)
out = self.IC2(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes, nf, config={}):
super(ResNet, self).__init__()
self.in_planes = nf
self.conv1 = conv3x3(3, nf * 1)
self.bn1 = nn.BatchNorm2d(nf * 1)
self.layer1 = self._make_layer(block, nf * 1, num_blocks[0], stride=1, config=config)
self.layer2 = self._make_layer(block, nf * 2, num_blocks[1], stride=2, config=config)
self.layer3 = self._make_layer(block, nf * 4, num_blocks[2], stride=2, config=config)
self.layer4 = self._make_layer(block, nf * 8, num_blocks[3], stride=2, config=config)
self.linear = nn.Linear(nf * 8 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride, config):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride, config=config))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, task_id):
bsz = x.size(0)
out = relu(self.bn1(self.conv1(x.view(bsz, 3, 32, 32))))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
t = task_id
offset1 = int((t-1) * 5)
offset2 = int(t * 5)
if offset1 > 0:
out[:, :offset1].data.fill_(-10e10)
if offset2 < 100:
out[:, offset2:100].data.fill_(-10e10)
return out
def ResNet18(nclasses=100, nf=20, config={}):
net = ResNet(BasicBlock, [2, 2, 2, 2], nclasses, nf, config=config)
return net
| 3,240 | 27.182609 | 87 | py |
stable-continual-learning | stable-continual-learning-master/stable_sgd/__init__.py | 0 | 0 | 0 | py |
|
stable-continual-learning | stable-continual-learning-master/external_libs/__init__.py | 0 | 0 | 0 | py |
|
stable-continual-learning | stable-continual-learning-master/external_libs/hessian_eigenthings/lanczos.py | """ Use scipy/ARPACK implicitly restarted lanczos to find top k eigenthings """
import numpy as np
import torch
from scipy.sparse.linalg import LinearOperator as ScipyLinearOperator
from scipy.sparse.linalg import eigsh
from warnings import warn
def lanczos(
operator,
num_eigenthings=10,
which="LM",
max_steps=20,
tol=1e-6,
num_lanczos_vectors=None,
init_vec=None,
use_gpu=False,
):
"""
Use the scipy.sparse.linalg.eigsh hook to the ARPACK lanczos algorithm
to find the top k eigenvalues/eigenvectors.
Parameters
-------------
operator: power_iter.Operator
linear operator to solve.
num_eigenthings : int
number of eigenvalue/eigenvector pairs to compute
which : str ['LM', SM', 'LA', SA']
L,S = largest, smallest. M, A = in magnitude, algebriac
SM = smallest in magnitude. LA = largest algebraic.
max_steps : int
maximum number of arnoldi updates
tol : float
relative accuracy of eigenvalues / stopping criterion
num_lanczos_vectors : int
number of lanczos vectors to compute. if None, > 2*num_eigenthings
init_vec: [torch.Tensor, torch.cuda.Tensor]
if None, use random tensor. this is the init vec for arnoldi updates.
use_gpu: bool
if true, use cuda tensors.
Returns
----------------
eigenvalues : np.ndarray
array containing `num_eigenthings` eigenvalues of the operator
eigenvectors : np.ndarray
array containing `num_eigenthings` eigenvectors of the operator
"""
if isinstance(operator.size, int):
size = operator.size
else:
size = operator.size[0]
shape = (size, size)
if num_lanczos_vectors is None:
num_lanczos_vectors = min(2 * num_eigenthings, size - 1)
if num_lanczos_vectors < 2 * num_eigenthings:
warn(
"[lanczos] number of lanczos vectors should usually be > 2*num_eigenthings"
)
def _scipy_apply(x):
x = torch.from_numpy(x)
if use_gpu:
x = x.cuda()
return operator.apply(x.float()).cpu().numpy()
scipy_op = ScipyLinearOperator(shape, _scipy_apply)
if init_vec is None:
init_vec = np.random.rand(size)
elif isinstance(init_vec, torch.Tensor):
init_vec = init_vec.cpu().numpy()
eigenvals, eigenvecs = eigsh(
A=scipy_op,
k=num_eigenthings,
which=which,
maxiter=max_steps,
tol=tol,
ncv=num_lanczos_vectors,
return_eigenvectors=True,
)
return eigenvals, eigenvecs.T
| 2,585 | 29.785714 | 87 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/hessian_eigenthings/utils.py | """ small helpers """
import shutil
import sys
import time
TOTAL_BAR_LENGTH = 65.0
term_width = shutil.get_terminal_size().columns
def log(msg):
# TODO make this an actual logger lol
print("[hessian_eigenthings] " + str(msg))
last_time = time.time()
begin_time = last_time
def format_time(seconds):
""" converts seconds into day-hour-minute-second-ms string format """
days = int(seconds / 3600 / 24)
seconds = seconds - days * 3600 * 24
hours = int(seconds / 3600)
seconds = seconds - hours * 3600
minutes = int(seconds / 60)
seconds = seconds - minutes * 60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds * 1000)
f = ""
i = 1
if days > 0:
f += str(days) + "D"
i += 1
if hours > 0 and i <= 2:
f += str(hours) + "h"
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + "m"
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + "s"
i += 1
if millis > 0 and i <= 2:
f += str(millis) + "ms"
i += 1
if f == "":
f = "0ms"
return f
def progress_bar(current, total, msg=None):
""" handy utility to display an updating progress bar...
percentage completed is computed as current/total
from: https://github.com/noahgolmant/skeletor/blob/master/skeletor/utils.py
"""
global last_time, begin_time # pylint: disable=global-statement
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH * current / total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(" [")
for _ in range(cur_len):
sys.stdout.write("=")
sys.stdout.write(">")
for _ in range(rest_len):
sys.stdout.write(".")
sys.stdout.write("]")
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(" Step: %s" % format_time(step_time))
L.append(" | Tot: %s" % format_time(tot_time))
if msg:
L.append(" | " + msg)
msg = "".join(L)
sys.stdout.write(msg)
for _ in range(term_width - int(TOTAL_BAR_LENGTH) - len(msg) - 3):
sys.stdout.write(" ")
# Go back to the center of the bar.
for _ in range(term_width - int(TOTAL_BAR_LENGTH / 2) + 2):
sys.stdout.write("\b")
sys.stdout.write(" %d/%d " % (current + 1, total))
if current < total - 1:
sys.stdout.write("\r")
else:
sys.stdout.write("\n")
sys.stdout.flush()
| 2,585 | 24.60396 | 79 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/hessian_eigenthings/hvp_operator.py | """
This module defines a linear operator to compute the hessian-vector product
for a given pytorch model using subsampled data.
"""
import torch
from .power_iter import Operator, deflated_power_iteration
from .lanczos import lanczos
class HVPOperator(Operator):
"""
Use PyTorch autograd for Hessian Vec product calculation
model: PyTorch network to compute hessian for
dataloader: pytorch dataloader that we get examples from to compute grads
loss: Loss function to descend (e.g. F.cross_entropy)
use_gpu: use cuda or not
max_samples: max number of examples per batch using all GPUs.
"""
def __init__(
self,
model,
dataloader,
criterion,
use_gpu=True,
full_dataset=True,
max_samples=256,
):
size = int(sum(p.numel() for p in model.parameters()))
super(HVPOperator, self).__init__(size)
self.grad_vec = torch.zeros(size)
self.model = model
if use_gpu:
self.model = self.model.cuda()
self.dataloader = dataloader
# Make a copy since we will go over it a bunch
self.dataloader_iter = iter(dataloader)
self.criterion = criterion
self.use_gpu = use_gpu
self.full_dataset = full_dataset
self.max_samples = max_samples
def apply(self, vec):
"""
Returns H*vec where H is the hessian of the loss w.r.t.
the vectorized model parameters
"""
if self.full_dataset:
return self._apply_full(vec)
else:
return self._apply_batch(vec)
def _apply_batch(self, vec):
# compute original gradient, tracking computation graph
self.zero_grad()
grad_vec = self.prepare_grad()
self.zero_grad()
# take the second gradient
grad_grad = torch.autograd.grad(
grad_vec, self.model.parameters(), grad_outputs=vec, only_inputs=True
)
# concatenate the results over the different components of the network
hessian_vec_prod = torch.cat([g.contiguous().view(-1) for g in grad_grad])
return hessian_vec_prod
def _apply_full(self, vec):
n = len(self.dataloader)
hessian_vec_prod = None
for _ in range(n):
if hessian_vec_prod is not None:
hessian_vec_prod += self._apply_batch(vec)
else:
hessian_vec_prod = self._apply_batch(vec)
hessian_vec_prod = hessian_vec_prod / n
return hessian_vec_prod
def zero_grad(self):
"""
Zeros out the gradient info for each parameter in the model
"""
for p in self.model.parameters():
if p.grad is not None:
p.grad.data.zero_()
def prepare_grad(self):
"""
Compute gradient w.r.t loss over all parameters and vectorize
"""
try:
all_inputs, all_targets = next(self.dataloader_iter)
except StopIteration:
self.dataloader_iter = iter(self.dataloader)
all_inputs, all_targets = next(self.dataloader_iter)
num_chunks = max(1, len(all_inputs) // self.max_samples)
grad_vec = None
input_chunks = all_inputs.chunk(num_chunks)
target_chunks = all_targets.chunk(num_chunks)
for input, target in zip(input_chunks, target_chunks):
if self.use_gpu:
input = input.cuda()
target = target.cuda()
output = self.model(input)
loss = self.criterion(output, target)
grad_dict = torch.autograd.grad(
loss, self.model.parameters(), create_graph=True
)
if grad_vec is not None:
grad_vec += torch.cat([g.contiguous().view(-1) for g in grad_dict])
else:
grad_vec = torch.cat([g.contiguous().view(-1) for g in grad_dict])
grad_vec /= num_chunks
self.grad_vec = grad_vec
return self.grad_vec
def compute_hessian_eigenthings(
model,
dataloader,
loss,
num_eigenthings=10,
full_dataset=True,
mode="power_iter",
use_gpu=True,
max_samples=512,
**kwargs
):
"""
Computes the top `num_eigenthings` eigenvalues and eigenvecs
for the hessian of the given model by using subsampled power iteration
with deflation and the hessian-vector product
Parameters
---------------
model : Module
pytorch model for this netowrk
dataloader : torch.data.DataLoader
dataloader with x,y pairs for which we compute the loss.
loss : torch.nn.modules.Loss | torch.nn.functional criterion
loss function to differentiate through
num_eigenthings : int
number of eigenvalues/eigenvecs to compute. computed in order of
decreasing eigenvalue magnitude.
full_dataset : boolean
if true, each power iteration call evaluates the gradient over the
whole dataset.
mode : str ['power_iter', 'lanczos']
which backend to use to compute the top eigenvalues.
use_gpu:
if true, attempt to use cuda for all lin alg computatoins
max_samples:
the maximum number of samples that can fit on-memory. used
to accumulate gradients for large batches.
**kwargs:
contains additional parameters passed onto lanczos or power_iter.
"""
hvp_operator = HVPOperator(
model,
dataloader,
loss,
use_gpu=use_gpu,
full_dataset=full_dataset,
max_samples=max_samples,
)
eigenvals, eigenvecs = None, None
if mode == "power_iter":
eigenvals, eigenvecs = deflated_power_iteration(
hvp_operator, num_eigenthings, use_gpu=use_gpu, **kwargs
)
elif mode == "lanczos":
eigenvals, eigenvecs = lanczos(
hvp_operator, num_eigenthings, use_gpu=use_gpu, **kwargs
)
else:
raise ValueError("Unsupported mode %s (must be power_iter or lanczos)" % mode)
return eigenvals, eigenvecs
| 6,060 | 32.486188 | 86 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/hessian_eigenthings/power_iter.py | """
This module contains functions to perform power iteration with deflation
to compute the top eigenvalues and eigenvectors of a linear operator
"""
import numpy as np
import torch
from .utils import log, progress_bar
class Operator:
"""
maps x -> Lx for a linear operator L
"""
def __init__(self, size):
self.size = size
def apply(self, vec):
"""
Function mapping vec -> L vec where L is a linear operator
"""
raise NotImplementedError
class LambdaOperator(Operator):
"""
Linear operator based on a provided lambda function
"""
def __init__(self, apply_fn, size):
super(LambdaOperator, self).__init__(size)
self.apply_fn = apply_fn
def apply(self, x):
return self.apply_fn(x)
def deflated_power_iteration(
operator,
num_eigenthings=10,
power_iter_steps=20,
power_iter_err_threshold=1e-4,
momentum=0.0,
use_gpu=True,
to_numpy=True,
):
"""
Compute top k eigenvalues by repeatedly subtracting out dyads
operator: linear operator that gives us access to matrix vector product
num_eigenvals number of eigenvalues to compute
power_iter_steps: number of steps per run of power iteration
power_iter_err_threshold: early stopping threshold for power iteration
returns: np.ndarray of top eigenvalues, np.ndarray of top eigenvectors
"""
eigenvals = []
eigenvecs = []
current_op = operator
prev_vec = None
def _deflate(x, val, vec):
return val * vec.dot(x) * vec
log("beginning deflated power iteration")
for i in range(num_eigenthings):
log("computing eigenvalue/vector %d of %d" % (i + 1, num_eigenthings))
eigenval, eigenvec = power_iteration(
current_op,
power_iter_steps,
power_iter_err_threshold,
momentum=momentum,
use_gpu=use_gpu,
init_vec=prev_vec,
)
log("eigenvalue %d: %.4f" % (i + 1, eigenval))
def _new_op_fn(x, op=current_op, val=eigenval, vec=eigenvec):
return op.apply(x) - _deflate(x, val, vec)
current_op = LambdaOperator(_new_op_fn, operator.size)
prev_vec = eigenvec
eigenvals.append(eigenval)
eigenvec = eigenvec.cpu()
if to_numpy:
eigenvecs.append(eigenvec.numpy())
else:
eigenvecs.append(eigenvec)
eigenvals = np.array(eigenvals)
eigenvecs = np.array(eigenvecs)
# sort them in descending order
sorted_inds = np.argsort(eigenvals)
eigenvals = eigenvals[sorted_inds][::-1]
eigenvecs = eigenvecs[sorted_inds][::-1]
return eigenvals, eigenvecs
def power_iteration(
operator, steps=20, error_threshold=1e-4, momentum=0.0, use_gpu=True, init_vec=None
):
"""
Compute dominant eigenvalue/eigenvector of a matrix
operator: linear Operator giving us matrix-vector product access
steps: number of update steps to take
returns: (principal eigenvalue, principal eigenvector) pair
"""
vector_size = operator.size # input dimension of operator
if init_vec is None:
vec = torch.rand(vector_size)
else:
vec = init_vec
if use_gpu:
vec = vec.cuda()
prev_lambda = 0.0
prev_vec = torch.randn_like(vec)
for i in range(steps):
prev_vec = vec / (torch.norm(vec) + 1e-6)
new_vec = operator.apply(vec) - momentum * prev_vec
# need to handle case where we end up in the nullspace of the operator.
# in this case, we are done.
if torch.sum(new_vec).item() == 0.0:
return 0.0, new_vec
lambda_estimate = vec.dot(new_vec).item()
diff = lambda_estimate - prev_lambda
vec = new_vec.detach() / torch.norm(new_vec)
if lambda_estimate == 0.0: # for low-rank
error = 1.0
else:
error = np.abs(diff / lambda_estimate)
progress_bar(i, steps, "power iter error: %.4f" % error)
if error < error_threshold:
return lambda_estimate, vec
prev_lambda = lambda_estimate
return lambda_estimate, vec
| 4,147 | 28.841727 | 87 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/hessian_eigenthings/__init__.py | """ Top-level module for hessian eigenvec computation
This library is cited in our paper.
"""
from .power_iter import power_iteration, deflated_power_iteration
from .lanczos import lanczos
from .hvp_operator import HVPOperator, compute_hessian_eigenthings
__all__ = [
"power_iteration",
"deflated_power_iteration",
"lanczos",
"HVPOperator",
"compute_hessian_eigenthings",
]
name = "hessian_eigenthings"
| 425 | 24.058824 | 66 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/hessian_eigenthings/spectral_density.py | import numpy as np
import torch
def _lanczos_step(vec, size, current_draw):
pass
def lanczos(
operator,
max_steps=20,
tol=1e-6,
num_lanczos_vectors=None,
init_vec=None,
use_gpu=False,
):
"""
Use the scipy.sparse.linalg.eigsh hook to the ARPACK lanczos algorithm
to find the top k eigenvalues/eigenvectors.
Parameters
-------------
operator: power_iter.Operator
linear operator to solve.
num_eigenthings : int
number of eigenvalue/eigenvector pairs to compute
which : str ['LM', SM', 'LA', SA']
L,S = largest, smallest. M, A = in magnitude, algebriac
SM = smallest in magnitude. LA = largest algebraic.
max_steps : int
maximum number of arnoldi updates
tol : float
relative accuracy of eigenvalues / stopping criterion
num_lanczos_vectors : int
number of lanczos vectors to compute. if None, > 2*num_eigenthings
init_vec: [torch.Tensor, torch.cuda.Tensor]
if None, use random tensor. this is the init vec for arnoldi updates.
use_gpu: bool
if true, use cuda tensors.
Returns
----------------
eigenvalues : np.ndarray
array containing `num_eigenthings` eigenvalues of the operator
eigenvectors : np.ndarray
array containing `num_eigenthings` eigenvectors of the operator
"""
if isinstance(operator.size, int):
size = operator.size
else:
size = operator.size[0]
if num_lanczos_vectors is None:
num_lanczos_vectors = min(2 * num_eigenthings, size - 1)
if num_lanczos_vectors < 2 * num_eigenthings:
warn(
"[lanczos] number of lanczos vectors should usually be > 2*num_eigenthings"
)
return eigenvals
| 1,764 | 27.467742 | 87 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/continual_learning_algorithms/fc_mnist.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for permute MNIST experiment.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import time
import datetime
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import construct_permute_mnist, construct_rotate_mnist
from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, update_reservior
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
EPOCHS_PER_TASK = 1
NUM_RUNS = 10 # Number of experiments to average over
TRAIN_ITERS = 5000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 1e-3
RANDOM_SEED = 1235
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_POWER = 0.9
OPT_MOMENTUM = 0.9
VALID_ARCHS = ['FC-S', 'FC-B']
ARCH = 'FC-S'
## Model options
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'A-GEM', 'S-GEM', 'FTR_EXT', 'PNN', 'ER'] #List of valid models
IMP_METHOD = 'EWC'
SYNAP_STGTH = 10
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 10 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 1 # Number of samples per task
INPUT_FEATURE_SIZE = 784
IMG_HEIGHT = 28
IMG_WIDTH = 28
IMG_CHANNELS = 1
TOTAL_CLASSES = 10 # Total number of classes in the dataset
EPS_MEM_BATCH_SIZE = 64
DEBUG_EPISODIC_MEMORY = False
USE_GPU = True
K_FOR_CROSS_VAL = 3
TIME_MY_METHOD = False
COUNT_VIOLATIONS = False
MEASURE_PERF_ON_EPS_MEMORY = False
## Logging, saving and testing options
LOG_DIR = './permute_mnist_results'
## Evaluation options
## Num Tasks
NUM_TASKS = 23
MULTI_TASK = False
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for permutted mnist experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_false",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_false",
help="If option is chosen then train for single epoch")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--arch", type=str, default=ARCH, help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument('--dataset', type=str, default='rot-mnist', help='dataset (benchmark). could be rot-mnist or perm-mnist')
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Number of samples per class from previous tasks.")
parser.add_argument("--decay", type=float, default=0.4,
help="learning rate decay factor (gamma)")
parser.add_argument("--eps-mem-batch", type=int, default=EPS_MEM_BATCH_SIZE,
help="Number of samples per class from previous tasks.")
parser.add_argument("--examples-per-task", type=int, default=50000,
help="Number of examples per task.")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, args):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
batch_size = args.batch_size
if model.imp_method == 'A-GEM' or model.imp_method == 'ER':
use_episodic_memory = True
else:
use_episodic_memory = False
# Loop over number of runs to average over
for runid in range(args.num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(args.random_seed+runid)
# Load the permute mnist dataset
if 'rot' in args.dataset or 'rotation' in args.dataset:
datasets = construct_rotate_mnist(model.num_tasks)
else:
datasets = construct_permute_mnist(model.num_tasks)
print("total datasets => ", len(datasets))
episodic_mem_size = args.mem_size*model.num_tasks*TOTAL_CLASSES
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
# List to store the classes that we have so far - used at test time
test_labels = np.arange(TOTAL_CLASSES)
if use_episodic_memory:
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, INPUT_FEATURE_SIZE])
episodic_labels = np.zeros([episodic_mem_size, TOTAL_CLASSES])
count_cls = np.zeros(TOTAL_CLASSES, dtype=np.int32)
episodic_filled_counter = 0
examples_seen_so_far = 0
# Mask for softmax
# Since all the classes are present in all the tasks so nothing to mask
logit_mask = np.ones(TOTAL_CLASSES)
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
pnn_logit_mask = np.ones([model.num_tasks, TOTAL_CLASSES])
if COUNT_VIOLATIONS:
violation_count = np.zeros(model.num_tasks)
vc = 0
# Training loop for all the tasks
for task in range(len(datasets)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0 and model.imp_method != 'PNN'):
model.restore(sess)
# Extract training images and labels for the current task
task_train_images = datasets[task]['train']['images']
task_train_labels = datasets[task]['train']['labels']
# If multi_task is set the train using datasets of all the tasks
if MULTI_TASK:
if task == 0:
for t_ in range(1, len(datasets)):
task_train_images = np.concatenate((task_train_images, datasets[t_]['train']['images']), axis=0)
task_train_labels = np.concatenate((task_train_labels, datasets[t_]['train']['labels']), axis=0)
else:
# Skip training for this task
continue
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
total_train_examples = task_train_images.shape[0]
# Randomly suffle the training examples
perm = np.arange(total_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm][:args.examples_per_task]
train_y = task_train_labels[perm][:args.examples_per_task]
task_sample_weights = task_sample_weights[perm][:args.examples_per_task]
print('Received {} images, {} labels at task {}'.format(train_x.shape[0], train_y.shape[0], task))
# Array to store accuracies when training for task T
ftask = []
num_train_examples = train_x.shape[0]
# Train a task observing sequence of data
if args.train_single_epoch:
num_iters = num_train_examples // batch_size
else:
num_iters = int(EPOCHS_PER_TASK*(num_train_examples // batch_size))
# Training loop for task T
for iters in range(num_iters):
if args.train_single_epoch and not args.cross_validate_mode:
if (iters < 10) or (iters < 100 and iters % 10 == 0) or (iters % 100 == 0):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets, args.online_cross_val)
ftask.append(fbatch)
offset = (iters * batch_size) % (num_train_examples - batch_size)
residual = batch_size
if model.imp_method == 'PNN':
pnn_train_phase[:] = False
pnn_train_phase[task] = True
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_[task]: train_y[offset:offset+batch_size],
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
else:
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_: train_y[offset:offset+batch_size],
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: True}
if model.imp_method == 'VAN':
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PNN':
feed_dict[model.task_id] = task
_, loss = sess.run([model.train[task], model.unweighted_entropy[task]], feed_dict=feed_dict)
elif model.imp_method == 'FTR_EXT':
if task == 0:
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train_classifier, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC':
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
if episodic_filled_counter <= args.eps_mem_batch:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, args.eps_mem_batch, replace=False) # Sample without replacement so that we don't sample an example more than once
# Store the reference gradient
sess.run(model.store_ref_grads, feed_dict={model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask],
model.keep_prob: 1.0, model.output_mask: logit_mask, model.train_phase: True})
if COUNT_VIOLATIONS:
vc, _, loss = sess.run([model.violation_count, model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
else:
# Compute the gradient for current task and project if need be
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
# Put the batch in the ring buffer
for er_x, er_y_ in zip(train_x[offset:offset+residual], train_y[offset:offset+residual]):
cls = np.unique(np.nonzero(er_y_))[-1]
# Write the example at the location pointed by count_cls[cls]
cls_to_index_map = cls
with_in_task_offset = args.mem_size * cls_to_index_map
mem_index = count_cls[cls] + with_in_task_offset + episodic_filled_counter
episodic_images[mem_index] = er_x
episodic_labels[mem_index] = er_y_
count_cls[cls] = (count_cls[cls] + 1) % args.mem_size
elif model.imp_method == 'RWALK':
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'ER':
mem_filled_so_far = examples_seen_so_far if (examples_seen_so_far < episodic_mem_size) else episodic_mem_size
if mem_filled_so_far < args.eps_mem_batch:
er_mem_indices = np.arange(mem_filled_so_far)
else:
er_mem_indices = np.random.choice(mem_filled_so_far, args.eps_mem_batch, replace=False)
np.random.shuffle(er_mem_indices)
# Train on a batch of episodic memory first
er_train_x_batch = np.concatenate((episodic_images[er_mem_indices], train_x[offset:offset+residual]), axis=0)
er_train_y_batch = np.concatenate((episodic_labels[er_mem_indices], train_y[offset:offset+residual]), axis=0)
feed_dict = {model.x: er_train_x_batch, model.y_: er_train_y_batch,
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: True}
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
for er_x, er_y_ in zip(train_x[offset:offset+residual], train_y[offset:offset+residual]):
update_reservior(er_x, er_y_, episodic_images, episodic_labels, episodic_mem_size, examples_seen_so_far)
examples_seen_so_far += 1
if (iters % 1000 == 0):
print("get global step")
tf.print(model.global_step, output_stream=sys.stderr)
tf.print(model.learning_rate, output_stream=sys.stderr)
lr = sess.run([model.learning_rate], feed_dict=feed_dict)
print(lr)
print('Step {:d} {:.3f}'.format(iters, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs Nans!!!')
sys.exit(0)
print('\t\t\t\tTraining for Task%d done!'%(task))
# Upaate the episodic memory filled counter
if use_episodic_memory:
episodic_filled_counter += args.mem_size * TOTAL_CLASSES
if model.imp_method == 'A-GEM' and COUNT_VIOLATIONS:
violation_count[task] = vc
print('Task {}: Violation Count: {}'.format(task, violation_count))
sess.run(model.reset_violation_count, feed_dict=feed_dict)
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if (task < (len(datasets) - 1)) or MEASURE_PERF_ON_EPS_MEMORY:
model.task_updates(sess, task, task_train_images, np.arange(TOTAL_CLASSES))
print('\t\t\t\tTask updates after Task%d done!'%(task))
if args.train_single_epoch and not args.cross_validate_mode:
fbatch = test_task_sequence(model, sess, datasets, False)
ftask.append(fbatch)
ftask = np.array(ftask)
else:
if MEASURE_PERF_ON_EPS_MEMORY:
eps_mem = {
'images': episodic_images,
'labels': episodic_labels,
}
# Measure perf on episodic memory
ftask = test_task_sequence(model, sess, eps_mem, args.online_cross_val)
else:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets, args.online_cross_val)
# Store the accuracies computed at task T in a list
evals.append(ftask)
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
runs.append(np.array(evals))
# End for loop runid
runs = np.array(runs)
return runs
def test_task_sequence(model, sess, test_data, cross_validate_mode):
"""
Snapshot the current performance
"""
if TIME_MY_METHOD:
# Only compute the training time
return np.zeros(model.num_tasks)
list_acc = []
if model.imp_method == 'PNN':
pnn_logit_mask = np.ones([model.num_tasks, TOTAL_CLASSES])
else:
logit_mask = np.ones(TOTAL_CLASSES)
if MEASURE_PERF_ON_EPS_MEMORY:
for task in range(model.num_tasks):
mem_offset = task*SAMPLES_PER_CLASS*TOTAL_CLASSES
feed_dict = {model.x: test_data['images'][mem_offset:mem_offset+SAMPLES_PER_CLASS*TOTAL_CLASSES],
model.y_: test_data['labels'][mem_offset:mem_offset+SAMPLES_PER_CLASS*TOTAL_CLASSES], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False}
acc = model.accuracy.eval(feed_dict = feed_dict)
list_acc.append(acc)
print(list_acc)
return list_acc
for task, _ in enumerate(test_data):
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
feed_dict = {model.x: test_data[task]['test']['images'],
model.y_[task]: test_data[task]['test']['labels'], model.keep_prob: 1.0}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
acc = model.accuracy[task].eval(feed_dict = feed_dict)
else:
feed_dict = {model.x: test_data[task]['test']['images'],
model.y_: test_data[task]['test']['labels'], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False}
acc = model.accuracy.eval(feed_dict = feed_dict)
list_acc.append(acc)
print("accuracy_list => ", list_acc)
return list_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
#
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'DATASET': 'PERMUTE_MNIST',
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': args.imp_method,
'SYNAP_STGTH': args.synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': args.learning_rate,
'BATCH_SIZE': args.batch_size,
'MEM_SIZE': args.mem_size}
experiment_id = "PERMUTE_MNIST_HERDING_%s_%s_%s_%s_%r_%s-"%(args.arch, args.train_single_epoch, args.imp_method, str(args.synap_stgth).replace('.', '_'),
str(args.batch_size), str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Get the subset of data depending on training or cross-validation mode
args.online_cross_val = False
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = NUM_TASKS #- K_FOR_CROSS_VAL
# Variables to store the accuracies and standard deviations of the experiment
acc_mean = dict()
acc_std = dict()
# Reset the default graph
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(args.random_seed)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, INPUT_FEATURE_SIZE])
#x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
if args.imp_method == 'PNN':
y_ = []
for i in range(num_tasks):
y_.append(tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES]))
else:
y_ = tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES])
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)
elif args.optim == 'MOMENTUM':
base_lr = tf.constant(args.learning_rate)
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(args.learning_rate, OPT_MOMENTUM)
# Create the Model/ contruct the graph
model = Model(x, y_, num_tasks, opt, args.imp_method, args.synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, all_args=args)
# Set up tf session and initialize variables.
if USE_GPU:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
else:
config = tf.ConfigProto(
device_count = {'GPU': 0}
)
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
runs = train_task_sequence(model, sess, args)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
# If cross-validation flag is enabled, store the stuff in a text file
if args.cross_validate_mode:
acc_mean = runs.mean(0)
acc_std = runs.std(0)
cross_validate_dump_file = args.log_dir + '/' + 'PERMUTE_MNIST_%s_%s'%(args.imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
if MULTI_TASK:
f.write('GPU:{} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(USE_GPU, args.arch, args.learning_rate,
args.synap_stgth, acc_mean[-1, :].mean()))
else:
f.write('GPU: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {} \t Fgt: {} \t Time: {}\n'.format(USE_GPU, args.arch, args.learning_rate,
args.synap_stgth, acc_mean[-1, :].mean(), compute_fgt(acc_mean), str(time_spent)))
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
if __name__ == '__main__':
main()
| 28,906 | 47.501678 | 201 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/continual_learning_algorithms/extract_res.py | from six.moves import cPickle as pickle
with open('./ring.pickle', 'rb') as f:
data = pickle.load(f)['mean']
print(data.shape)
print(data[0][-1][-1])
| 153 | 18.25 | 39 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/continual_learning_algorithms/conv_split_cifar.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for split CIFAR 100 experiment.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import time
import datetime
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import construct_split_cifar
from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, load_task_specific_data
from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 5 # Number of experiments to average over
TRAIN_ITERS = 2000 # Number of training iterations per task
BATCH_SIZE = 64
LEARNING_RATE = 0.1
RANDOM_SEED = 3235
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_MOMENTUM = 0.9
OPT_POWER = 0.9
VALID_ARCHS = ['CNN', 'RESNET-S', 'RESNET-B', 'VGG']
ARCH = 'RESNET-S'
## Model options
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'M-EWC', 'S-GEM', 'A-GEM', 'FTR_EXT', 'PNN', 'ER'] #List of valid models
IMP_METHOD = 'EWC'
SYNAP_STGTH = 10
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 50 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 1
IMG_HEIGHT = 32
IMG_WIDTH = 32
IMG_CHANNELS = 3
TOTAL_CLASSES = 100 # Total number of classes in the dataset
VISUALIZE_IMPORTANCE_MEASURE = False
MEASURE_CONVERGENCE_AFTER = 0.9
EPS_MEM_BATCH_SIZE = 256
DEBUG_EPISODIC_MEMORY = False
K_FOR_CROSS_VAL = 3
TIME_MY_METHOD = False
COUNT_VIOLATONS = False
MEASURE_PERF_ON_EPS_MEMORY = False
## Logging, saving and testing options
LOG_DIR = './split_cifar_results'
RESNET18_CIFAR10_CHECKPOINT = './resnet-18-pretrained-cifar10/model.ckpt-19999'
## Evaluation options
## Task split
NUM_TASKS = 23
MULTI_TASK = False
# Define function to load/ store training weights. We will use ImageNet initialization later on
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for split cifar experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--arch", type=str, default=ARCH,
help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Total size of episodic memory.")
parser.add_argument("--eps-mem-batch", type=int, default=EPS_MEM_BATCH_SIZE,
help="Number of samples per class from previous tasks.")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, datasets, args):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
task_labels_dataset = []
if model.imp_method == 'A-GEM' or model.imp_method == 'ER':
use_episodic_memory = True
else:
use_episodic_memory = False
batch_size = args.batch_size
# Loop over number of runs to average over
for runid in range(args.num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(args.random_seed+runid)
# Get the task labels from the total number of tasks and full label space
task_labels = []
classes_per_task = TOTAL_CLASSES// NUM_TASKS
total_classes = classes_per_task * model.num_tasks
if args.online_cross_val:
label_array = np.arange(total_classes)
else:
class_label_offset = K_FOR_CROSS_VAL * classes_per_task
label_array = np.arange(class_label_offset, total_classes+class_label_offset)
np.random.shuffle(label_array)
for tt in range(model.num_tasks):
tt_offset = tt*classes_per_task
task_labels.append(list(label_array[tt_offset:tt_offset+classes_per_task]))
print('Task: {}, Labels:{}'.format(tt, task_labels[tt]))
# Store the task labels
task_labels_dataset.append(task_labels)
# Set episodic memory size
episodic_mem_size = args.mem_size * total_classes
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
# List to store the classes that we have so far - used at test time
test_labels = []
if use_episodic_memory:
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
episodic_labels = np.zeros([episodic_mem_size, TOTAL_CLASSES])
episodic_filled_counter = 0
nd_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
count_cls = np.zeros(TOTAL_CLASSES, dtype=np.int32)
episodic_filled_counter = 0
examples_seen_so_far = 0
# Mask for softmax
logit_mask = np.zeros(TOTAL_CLASSES)
if COUNT_VIOLATONS:
violation_count = np.zeros(model.num_tasks)
vc = 0
# Training loop for all the tasks
for task in range(len(task_labels)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0 and model.imp_method != 'PNN'):
model.restore(sess)
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
pnn_train_phase[task] = True
pnn_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
# If not in the cross validation mode then concatenate the train and validation sets
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[task])
task_val_images, task_val_labels = load_task_specific_data(datasets[0]['validation'], task_labels[task])
task_train_images, task_train_labels = concatenate_datasets(task_tr_images, task_tr_labels, task_val_images, task_val_labels)
# If multi_task is set then train using all the datasets of all the tasks
if MULTI_TASK:
if task == 0:
for t_ in range(1, len(task_labels)):
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[t_])
task_train_images = np.concatenate((task_train_images, task_tr_images), axis=0)
task_train_labels = np.concatenate((task_train_labels, task_tr_labels), axis=0)
else:
# Skip training for this task
continue
print('Received {} images, {} labels at task {}'.format(task_train_images.shape[0], task_train_labels.shape[0], task))
print('Unique labels in the task: {}'.format(np.unique(np.nonzero(task_train_labels)[1])))
# Test for the tasks that we've seen so far
test_labels += task_labels[task]
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
num_train_examples = task_train_images.shape[0]
logit_mask[:] = 0
# Train a task observing sequence of data
if args.train_single_epoch:
# Ceiling operation
num_iters = (num_train_examples + batch_size - 1) // batch_size
if args.cross_validate_mode:
logit_mask[task_labels[task]] = 1.0
else:
num_iters = args.train_iters
# Set the mask only once before starting the training for the task
logit_mask[task_labels[task]] = 1.0
if MULTI_TASK:
logit_mask[:] = 1.0
# Randomly suffle the training examples
perm = np.arange(num_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm]
train_y = task_train_labels[perm]
task_sample_weights = task_sample_weights[perm]
# Array to store accuracies when training for task T
ftask = []
# Number of iterations after which convergence is checked
convergence_iters = int(num_iters * MEASURE_CONVERGENCE_AFTER)
# Training loop for task T
for iters in range(num_iters):
if args.train_single_epoch and not args.cross_validate_mode and not MULTI_TASK:
if (iters <= 20) or (iters > 20 and iters % 50 == 0):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
ftask.append(fbatch)
if model.imp_method == 'PNN':
pnn_train_phase[:] = False
pnn_train_phase[task] = True
pnn_logit_mask[:] = 0
pnn_logit_mask[task][task_labels[task]] = 1.0
elif model.imp_method == 'A-GEM':
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
else:
# Set the output labels over which the model needs to be trained
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
if args.train_single_epoch:
offset = iters * batch_size
if (offset+batch_size <= num_train_examples):
residual = batch_size
else:
residual = num_train_examples - offset
if model.imp_method == 'PNN':
feed_dict = {model.x: train_x[offset:offset+residual], model.y_[task]: train_y[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
else:
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: train_y[offset:offset+residual],
model.sample_weights: task_sample_weights[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
else:
offset = (iters * batch_size) % (num_train_examples - batch_size)
if model.imp_method == 'PNN':
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_[task]: train_y[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
else:
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_: train_y[offset:offset+batch_size],
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
if model.imp_method == 'VAN':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PNN':
_, loss = sess.run([model.train[task], model.unweighted_entropy[task]], feed_dict=feed_dict)
elif model.imp_method == 'FTR_EXT':
feed_dict[model.output_mask] = logit_mask
if task == 0:
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train_classifier, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC' or model.imp_method == 'M-EWC':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
if (iters >= convergence_iters) and (model.imp_method == 'M-EWC'):
_, _, _, _, loss = sess.run([model.weights_old_ops_grouped, model.set_tmp_fisher, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
else:
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
feed_dict[model.output_mask] = logit_mask
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
# Set the mask for all the previous tasks so far
nd_logit_mask[:] = 0
for tt in range(task):
nd_logit_mask[tt][task_labels[tt]] = 1.0
if episodic_filled_counter <= args.eps_mem_batch:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, args.eps_mem_batch, replace=False) # Sample without replacement so that we don't sample an example more than once
# Store the reference gradient
ref_feed_dict = {model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask],
model.keep_prob: 1.0, model.train_phase: True}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
ref_feed_dict.update(logit_mask_dict)
ref_feed_dict[model.mem_batch_size] = float(len(mem_sample_mask))
sess.run(model.store_ref_grads, feed_dict=ref_feed_dict)
# Compute the gradient for current task and project if need be
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
if COUNT_VIOLATONS:
vc, _, loss = sess.run([model.violation_count, model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
# Put the batch in the ring buffer
for er_x, er_y_ in zip(train_x[offset:offset+residual], train_y[offset:offset+residual]):
cls = np.unique(np.nonzero(er_y_))[-1]
# Write the example at the location pointed by count_cls[cls]
cls_to_index_map = np.where(np.array(task_labels[task]) == cls)[0][0]
with_in_task_offset = args.mem_size * cls_to_index_map
mem_index = count_cls[cls] + with_in_task_offset + episodic_filled_counter
episodic_images[mem_index] = er_x
episodic_labels[mem_index] = er_y_
count_cls[cls] = (count_cls[cls] + 1) % args.mem_size
elif model.imp_method == 'RWALK':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'ER':
mem_filled_so_far = examples_seen_so_far if (examples_seen_so_far < episodic_mem_size) else episodic_mem_size
if mem_filled_so_far < args.eps_mem_batch:
er_mem_indices = np.arange(mem_filled_so_far)
else:
er_mem_indices = np.random.choice(mem_filled_so_far, args.eps_mem_batch, replace=False)
np.random.shuffle(er_mem_indices)
nd_logit_mask[:] = 0
for tt in range(task+1):
nd_logit_mask[tt][task_labels[tt]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
er_train_x_batch = np.concatenate((episodic_images[er_mem_indices], train_x[offset:offset+residual]), axis=0)
er_train_y_batch = np.concatenate((episodic_labels[er_mem_indices], train_y[offset:offset+residual]), axis=0)
feed_dict = {model.x: er_train_x_batch, model.y_: er_train_y_batch,
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.train_phase: True}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = float(er_train_x_batch.shape[0])
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
# Reservoir update
for er_x, er_y_ in zip(train_x[offset:offset+residual], train_y[offset:offset+residual]):
update_reservior(er_x, er_y_, episodic_images, episodic_labels, episodic_mem_size, examples_seen_so_far)
examples_seen_so_far += 1
if (iters % 100 == 0):
print('Step {:d} {:.3f}'.format(iters, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs NaNs!!!')
sys.exit(0)
print('\t\t\t\tTraining for Task%d done!'%(task))
if use_episodic_memory:
episodic_filled_counter += args.mem_size * classes_per_task
if model.imp_method == 'A-GEM':
if COUNT_VIOLATONS:
violation_count[task] = vc
print('Task {}: Violation Count: {}'.format(task, violation_count))
sess.run(model.reset_violation_count, feed_dict=feed_dict)
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if (task < (len(task_labels) - 1)) or MEASURE_PERF_ON_EPS_MEMORY:
model.task_updates(sess, task, task_train_images, task_labels[task]) # TODO: For MAS, should the gradients be for current task or all the previous tasks
print('\t\t\t\tTask updates after Task%d done!'%(task))
if VISUALIZE_IMPORTANCE_MEASURE:
if runid == 0:
for i in range(len(model.fisher_diagonal_at_minima)):
if i == 0:
flatten_fisher = np.array(model.fisher_diagonal_at_minima[i].eval()).flatten()
else:
flatten_fisher = np.concatenate((flatten_fisher,
np.array(model.fisher_diagonal_at_minima[i].eval()).flatten()))
#flatten_fisher [flatten_fisher > 0.1] = 0.1
if args.train_single_epoch:
plot_histogram(flatten_fisher, 100, '/private/home/arslanch/Dropbox/LLL_experiments/Single_Epoch/importance_vis/single_epoch/m_ewc/hist_fisher_task%s.png'%(task))
else:
plot_histogram(flatten_fisher, 100, '/private/home/arslanch/Dropbox/LLL_experiments/Single_Epoch/importance_vis/single_epoch/m_ewc/hist_fisher_task%s.png'%(task))
if args.train_single_epoch and not args.cross_validate_mode:
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
print('Task: {}, Acc: {}'.format(task, fbatch))
ftask.append(fbatch)
ftask = np.array(ftask)
if model.imp_method == 'PNN':
pnn_train_phase[:] = False
pnn_train_phase[task] = True
pnn_logit_mask[:] = 0
pnn_logit_mask[task][task_labels[task]] = 1.0
else:
if MEASURE_PERF_ON_EPS_MEMORY:
eps_mem = {
'images': episodic_images,
'labels': episodic_labels,
}
# Measure perf on episodic memory
ftask = test_task_sequence(model, sess, eps_mem, task_labels, task, classes_per_task=classes_per_task)
else:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
print('Task: {}, Acc: {}'.format(task, ftask))
# Store the accuracies computed at task T in a list
evals.append(ftask)
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
runs.append(np.array(evals))
# End for loop runid
runs = np.array(runs)
return runs, task_labels_dataset
def test_task_sequence(model, sess, test_data, test_tasks, task, classes_per_task=0):
"""
Snapshot the current performance
"""
if TIME_MY_METHOD:
# Only compute the training time
return np.zeros(model.num_tasks)
final_acc = np.zeros(model.num_tasks)
if model.imp_method == 'PNN' or model.imp_method == 'A-GEM' or model.imp_method == 'ER':
logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
else:
logit_mask = np.zeros(TOTAL_CLASSES)
if MEASURE_PERF_ON_EPS_MEMORY:
for tt, labels in enumerate(test_tasks):
# Multi-head evaluation setting
logit_mask[:] = 0
logit_mask[labels] = 1.0
mem_offset = tt*SAMPLES_PER_CLASS*classes_per_task
feed_dict = {model.x: test_data['images'][mem_offset:mem_offset+SAMPLES_PER_CLASS*classes_per_task],
model.y_: test_data['labels'][mem_offset:mem_offset+SAMPLES_PER_CLASS*classes_per_task], model.keep_prob: 1.0, model.train_phase: False, model.output_mask: logit_mask}
acc = model.accuracy.eval(feed_dict = feed_dict)
final_acc[tt] = acc
return final_acc
for tt, labels in enumerate(test_tasks):
if not MULTI_TASK:
if tt > task:
return final_acc
task_test_images, task_test_labels = load_task_specific_data(test_data, labels)
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
logit_mask[:] = 0
logit_mask[tt][labels] = 1.0
feed_dict = {model.x: task_test_images,
model.y_[tt]: task_test_labels, model.keep_prob: 1.0}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
acc = model.accuracy[tt].eval(feed_dict = feed_dict)
elif model.imp_method == 'A-GEM' or model.imp_method == 'ER':
logit_mask[:] = 0
logit_mask[tt][labels] = 1.0
feed_dict = {model.x: task_test_images,
model.y_: task_test_labels, model.keep_prob: 1.0, model.train_phase: False}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
feed_dict.update(logit_mask_dict)
acc = model.accuracy[tt].eval(feed_dict = feed_dict)
else:
logit_mask[:] = 0
logit_mask[labels] = 1.0
feed_dict = {model.x: task_test_images,
model.y_: task_test_labels, model.keep_prob: 1.0, model.train_phase: False, model.output_mask: logit_mask}
acc = model.accuracy.eval(feed_dict = feed_dict)
final_acc[tt] = acc
return final_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'ARCH': args.arch,
'DATASET': 'SPLIT_CIFAR',
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': args.imp_method,
'SYNAP_STGTH': args.synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': args.learning_rate,
'BATCH_SIZE': args.batch_size,
'MEM_SIZE': args.mem_size}
experiment_id = "SPLIT_CIFAR_HERDING_%s_%r_%s_%s_%s_%s_%s-"%(args.arch, args.train_single_epoch, args.imp_method,
str(args.synap_stgth).replace('.', '_'), str(args.learning_rate).replace('.', '_'),
str(args.batch_size), str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Get the task labels from the total number of tasks and full label space
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = NUM_TASKS - K_FOR_CROSS_VAL
# Load the split cifar dataset
data_labs = [np.arange(TOTAL_CLASSES)]
datasets = construct_split_cifar(data_labs)
# Variables to store the accuracies and standard deviations of the experiment
acc_mean = dict()
acc_std = dict()
# Reset the default graph
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(args.random_seed)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
if args.imp_method == 'PNN':
y_ = []
for i in range(num_tasks):
y_.append(tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES]))
else:
y_ = tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES])
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)
elif args.optim == 'MOMENTUM':
base_lr = tf.constant(args.learning_rate)
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(args.learning_rate, OPT_MOMENTUM)
# Create the Model/ contruct the graph
model = Model(x, y_, num_tasks, opt, args.imp_method, args.synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
runs, task_labels_dataset = train_task_sequence(model, sess, datasets, args)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
exper_labels = dict(labels=task_labels_dataset)
# If cross-validation flag is enabled, store the stuff in a text file
if args.cross_validate_mode:
acc_mean, acc_std = average_acc_stats_across_runs(runs, model.imp_method)
fgt_mean, fgt_std = average_fgt_stats_across_runs(runs, model.imp_method)
cross_validate_dump_file = args.log_dir + '/' + 'SPLIT_CIFAR_%s_%s'%(args.imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
if MULTI_TASK:
f.write('HERDING: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(args.arch, args.learning_rate, args.synap_stgth, acc_mean[-1,:].mean()))
else:
f.write('ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {} \t Fgt: {} \t Time: {}\n'.format(args.arch, args.learning_rate,
args.synap_stgth, acc_mean, fgt_mean, str(time_spent)))
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
snapshot_task_labels(args.log_dir, experiment_id, exper_labels)
if __name__ == '__main__':
main()
| 37,561 | 48.816976 | 201 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/continual_learning_algorithms/utils/er_utils.py | import numpy as np
def update_reservior(images, labels, episodic_images, episodic_labels, M, N):
"""
Update the episodic memory with current example using the reservior sampling
"""
for er_x, er_y in zip(images, labels):
if M > N:
episodic_images[N] = er_x
episodic_labels[N] = er_y
else:
j = np.random.randint(0, N)
if j < M:
episodic_images[j] = er_x
episodic_labels[j] = er_y
N += 1
return N
def update_fifo_buffer(images, labels, episodic_images, episodic_labels, task_labels, mem_per_class, count_cls, N):
for er_x, er_y in zip(images, labels):
cls = np.unique(np.nonzero(er_y))[-1]
# Write the example at the location pointed by count_cls[cls]
cls_to_index_map = np.where(np.array(task_labels) == cls)[0][0]
with_in_task_offset = mem_per_class * cls_to_index_map
mem_index = count_cls[cls] + with_in_task_offset + N
episodic_images[mem_index] = er_x
episodic_labels[mem_index] = er_y
count_cls[cls] = (count_cls[cls] + 1) % mem_per_class
return
def er_mem_update_hindsight(model, sess, x_hat_batch, y_hat_batch, episodic_images, episodic_labels, episodic_filled_counter,
task_labels, logit_mask, phi_hat, avg_img_vectors, args, loop_over_mem=50):
"""
Update the episodic memory using hindsight
"""
# Store the current estimate of the parameters in the star_params
sess.run(model.set_star_vars)
# Train on the episodic memory to get the new estimate of the parameters
batch_size = 10
samples_at_a_time = episodic_filled_counter if (episodic_filled_counter <= batch_size) else batch_size
for jj in range(loop_over_mem):
mem_indices = np.random.choice(episodic_filled_counter, samples_at_a_time, replace=False)
train_x = episodic_images[mem_indices]
train_y = episodic_labels[mem_indices]
feed_dict = {model.x: train_x, model.y_: train_y, model.keep_prob: 1.0,
model.learning_rate: args.learning_rate}
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
if jj % 5 == 0:
print('Hindsight loss:{}'.format(loss))
# Update this synthetic samples by maximizing forgetting loss while maintaining good performance on the current task
for jj, cls in enumerate(task_labels):
y_hat_dense = np.repeat(cls, 1)
y_hat_one_hot = _dense_to_one_hot(y_hat_dense, model.total_classes)
# Initialize the anchor for this task this class
sess.run(model.anchor_xx.assign(np.expand_dims(avg_img_vectors[cls], axis=0)))
for ii in range(100):
feed_dict = {model.y_: y_hat_one_hot, model.phi_hat_reference: phi_hat[cls] , model.keep_prob: 1.0}
feed_dict[model.output_mask] = logit_mask
fgt_loss, phi_dist, total_loss, _ = sess.run([model.negForgetting_loss, model.phi_distance, model.hindsight_objective, model.update_hindsight_anchor], feed_dict=feed_dict)
if ii%100 == 0:
print('Fgt_loss: {}\t Phi_dist: {}\t Total: {}'.format(fgt_loss, phi_dist, total_loss))
# Store the learned images in the episodic memory
offset = jj
class_x_hat = sess.run(model.anchor_xx)
x_hat_batch[jj] = class_x_hat
y_hat_batch[jj] = y_hat_one_hot
# Restore the weights
sess.run(model.restore_weights)
return x_hat_batch, y_hat_batch
def update_avg_image_vectors(train_x, train_y, avg_img_vectors, running_alpha=0.5):
"""
Updates the average image vectors
avg_img_vectors => TOTAL_CLASSES x H x W x C
"""
# For each label in the batch, update the corresponding avg_image_vector
num_examples = train_x.shape[0]
for ii in range(num_examples):
yy = train_y[ii]
cls = np.nonzero(yy)
avg_img_vectors[cls] -= (1 - running_alpha) * (avg_img_vectors[cls] - train_x[ii]) # running average
return
# -------------------------- Internet APIs ----------------------------------------------------------------------------------------
def _dense_to_one_hot(labels_dense, num_classes):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def _get_indices_of_class_examples(train_y, cls):
"""
Returns the indies of examples with given class label
"""
global_class_indices = np.column_stack(np.nonzero(train_y))
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])])
return class_indices | 4,885 | 42.625 | 183 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/continual_learning_algorithms/utils/vgg_utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import tensorflow as tf
import numpy as np
def vgg_conv_layer(x, kernel_size, out_channels, stride, var_list, pad="SAME", name="conv"):
"""
Define API for conv operation. This includes kernel declaration and
conv operation both followed by relu.
"""
in_channels = x.get_shape().as_list()[-1]
with tf.variable_scope(name):
#n = kernel_size * kernel_size * out_channels
n = kernel_size * in_channels
stdv = 1.0 / math.sqrt(n)
w = tf.get_variable('kernel_weights', [kernel_size, kernel_size, in_channels, out_channels],
tf.float32,
initializer=tf.random_uniform_initializer(-stdv, stdv))
b = tf.get_variable('kernel_biases', [out_channels], tf.float32, initializer=tf.random_uniform_initializer(-stdv, stdv))
# Append the variable to the trainable variables list
var_list.append(w)
var_list.append(b)
# Do the convolution operation
bias = tf.nn.bias_add(tf.nn.conv2d(x, w, [1, stride, stride, 1], padding=pad), b)
relu = tf.nn.relu(bias)
return relu
def vgg_fc_layer(x, out_dim, var_list, apply_relu=True, name="fc"):
"""
Define API for the fully connected layer. This includes both the variable
declaration and matmul operation.
"""
in_dim = x.get_shape().as_list()[1]
stdv = 1.0 / math.sqrt(in_dim)
with tf.variable_scope(name):
# Define the weights and biases for this layer
w = tf.get_variable('weights', [in_dim, out_dim], tf.float32,
initializer=tf.random_uniform_initializer(-stdv, stdv))
b = tf.get_variable('biases', [out_dim], tf.float32, initializer=tf.random_uniform_initializer(-stdv, stdv))
# Append the variable to the trainable variables list
var_list.append(w)
var_list.append(b)
# Do the FC operation
output = tf.matmul(x, w) + b
# Apply relu if needed
if apply_relu:
output = tf.nn.relu(output)
return output
| 2,219 | 36.627119 | 128 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/continual_learning_algorithms/utils/utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Define some utility functions
"""
import numpy as np
import tensorflow as tf
def clone_variable_list(variable_list):
"""
Clone the variable list
"""
return [tf.identity(var) for var in variable_list]
def create_fc_layer(input, w, b, apply_relu=True):
"""
Construct a Fully Connected layer
Args:
w Weights
b Biases
apply_relu Apply relu (T/F)?
Returns:
Output of an FC layer
"""
with tf.name_scope('fc_layer'):
output = tf.matmul(input, w) + b
# Apply relu
if apply_relu:
output = tf.nn.relu(output)
return output
def create_conv_layer(input, w, b, stride=1, apply_relu=True):
"""
Construct a convolutional layer
Args:
w Weights
b Biases
pre_activations List where the pre_activations will be stored
apply_relu Apply relu (T/F)?
Returns:
Output of a conv layer
"""
with tf.name_scope('conv_layer'):
# Do the convolution operation
output = tf.nn.conv2d(input, w, [1, stride, stride, 1], padding='SAME') + b
# Apply relu
if apply_relu:
output = tf.nn.relu(output)
return output
def load_task_specific_data_in_proportion(datasets, task_labels, classes_appearing_in_tasks, class_seen_already):
"""
Loads task specific data from the datasets proportionate to classes appearing in different tasks
"""
global_class_indices = np.column_stack(np.nonzero(datasets['labels']))
count = 0
for cls in task_labels:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])])
total_class_instances = class_indices.size
num_instances_to_choose = total_class_instances // classes_appearing_in_tasks[cls]
offset = (class_seen_already[cls] - 1) * num_instances_to_choose
final_class_indices = class_indices[offset: offset+num_instances_to_choose]
else:
current_class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])])
total_class_instances = current_class_indices.size
num_instances_to_choose = total_class_instances // classes_appearing_in_tasks[cls]
offset = (class_seen_already[cls] - 1) * num_instances_to_choose
final_class_indices = np.append(final_class_indices, current_class_indices[offset: offset+num_instances_to_choose])
count += 1
final_class_indices = np.sort(final_class_indices, axis=None)
return datasets['images'][final_class_indices, :], datasets['labels'][final_class_indices, :]
def load_task_specific_data(datasets, task_labels):
"""
Loads task specific data from the datasets
"""
global_class_indices = np.column_stack(np.nonzero(datasets['labels']))
count = 0
for cls in task_labels:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])])
else:
class_indices = np.append(class_indices, np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])]))
count += 1
class_indices = np.sort(class_indices, axis=None)
return datasets['images'][class_indices, :], datasets['labels'][class_indices, :]
def samples_for_each_class(dataset_labels, task):
"""
Numbers of samples for each class in the task
Args:
dataset_labels Labels to count samples from
task Labels with in a task
Returns
"""
num_samples = np.zeros([len(task)], dtype=np.float32)
i = 0
for label in task:
global_class_indices = np.column_stack(np.nonzero(dataset_labels))
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == label][:,np.array([True, False])])
class_indices = np.sort(class_indices, axis=None)
num_samples[i] = len(class_indices)
i += 1
return num_samples
def get_sample_weights(labels, tasks):
weights = np.zeros([labels.shape[0]], dtype=np.float32)
for label in tasks:
global_class_indices = np.column_stack(np.nonzero(labels))
class_indices = np.array(np.squeeze(global_class_indices[global_class_indices[:,1] == label][:,np.array([True, False])]))
total_class_samples = class_indices.shape[0]
weights[class_indices] = 1.0/ total_class_samples
# Rescale the weights such that min is 1. This will make the weights of less observed
# examples 1.
weights /= weights.min()
return weights
def update_episodic_memory_with_less_data(task_dataset, importance_array, total_mem_size, task, episodic_images, episodic_labels, task_labels=None, is_herding=False):
"""
Update the episodic memory when the task data is less than the memory size
Args:
Returns:
"""
num_examples_in_task = task_dataset['images'].shape[0]
# Empty spaces in the episodic memory
empty_spaces = np.sum(np.sum(episodic_labels, axis=1) == 0)
if empty_spaces >= num_examples_in_task:
# Find where the empty spaces are in order
empty_indices = np.where(np.sum(episodic_labels, axis=1) == 0)[0]
# Store the whole task data in the episodic memory
episodic_images[empty_indices[:num_examples_in_task]] = task_dataset['images']
episodic_labels[empty_indices[:num_examples_in_task]] = task_dataset['labels']
elif empty_spaces == 0:
# Compute the amount of space in the episodic memory for the new task
space_for_new_task = total_mem_size// (task + 1) # task 0, 1, ...
# Get the indices to update in the episodic memory
eps_mem_indices = np.random.choice(total_mem_size, space_for_new_task, replace=False) # Sample without replacement
# Get the indices of important samples from the task dataset
label_importance = importance_array + 1e-32
label_importance /= np.sum(label_importance) # Convert to a probability distribution
task_mem_indices = np.random.choice(num_examples_in_task, space_for_new_task, p=label_importance, replace=False) # Sample without replacement
# Update the episodic memory
episodic_images[eps_mem_indices] = task_dataset['images'][task_mem_indices]
episodic_labels[eps_mem_indices] = task_dataset['labels'][task_mem_indices]
else:
# When there is some free space but not enough to store the whole task
# Find where the empty spaces are in order
empty_indices = np.where(np.sum(episodic_labels, axis=1) == 0)[0]
# Store some of the examples from task in the memory
episodic_images[empty_indices] = task_dataset['images'][:len(empty_indices)]
episodic_labels[empty_indices] = task_dataset['labels'][:len(empty_indices)]
# Adjust the remanining samples in the episodic memory
space_for_new_task = (total_mem_size // (task + 1)) - len(empty_indices) # task 0, 1, ...
# Get the indices to update in the episodic memory
eps_mem_indices = np.random.choice((total_mem_size - len(empty_indices)), space_for_new_task, replace=False) # Sample without replacement
# Get the indices of important samples from the task dataset
label_importance = importance_array[len(empty_indices):] + 1e-32
label_importance /= np.sum(label_importance) # Convert to a probability distribution
updated_num_examples_in_task = num_examples_in_task - len(empty_indices)
task_mem_indices = np.random.choice(updated_num_examples_in_task, space_for_new_task, p=label_importance, replace=False) # Sample without replacement
task_mem_indices += len(empty_indices) # Add the offset
# Update the episodic memory
episodic_images[eps_mem_indices] = task_dataset['images'][task_mem_indices]
episodic_labels[eps_mem_indices] = task_dataset['labels'][task_mem_indices]
def update_episodic_memory(task_dataset, importance_array, total_mem_size, task, episodic_images, episodic_labels, task_labels=None, is_herding=False):
"""
Update the episodic memory with new task data
Args:
Reruns:
"""
num_examples_in_task = task_dataset['images'].shape[0]
# Compute the amount of space in the episodic memory for the new task
space_for_new_task = total_mem_size// (task + 1) # task 0, 1, ...
# Get the indices to update in the episodic memory
eps_mem_indices = np.random.choice(total_mem_size, space_for_new_task, replace=False) # Sample without replacement
if is_herding and task_labels is not None:
# Get the samples based on herding
imp_images, imp_labels = sample_from_dataset_icarl(task_dataset, importance_array, task_labels, space_for_new_task//len(task_labels))
episodic_images[eps_mem_indices[np.arange(imp_images.shape[0])]] = imp_images
episodic_labels[eps_mem_indices[np.arange(imp_images.shape[0])]] = imp_labels
else:
# Get the indices of important samples from the task dataset
label_importance = importance_array + 1e-32
label_importance /= np.sum(label_importance) # Convert to a probability distribution
task_mem_indices = np.random.choice(num_examples_in_task, space_for_new_task, p=label_importance, replace=False) # Sample without replacement
# Update the episodic memory
episodic_images[eps_mem_indices] = task_dataset['images'][task_mem_indices]
episodic_labels[eps_mem_indices] = task_dataset['labels'][task_mem_indices]
def sample_from_dataset(dataset, importance_array, task, samples_count, preds=None):
"""
Samples from a dataset based on a probability distribution
Args:
dataset Dataset to sample from
importance_array Importance scores (not necessarily have to be a prob distribution)
task Labels with in a task
samples_count Number of samples to return
Return:
images Important images
labels Important labels
"""
count = 0
# For each label in the task extract the important samples
for label in task:
global_class_indices = np.column_stack(np.nonzero(dataset['labels']))
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == label][:,np.array([True, False])])
class_indices = np.sort(class_indices, axis=None)
if (preds is not None):
# Find the indices where prediction match the correct label
pred_indices = np.where(preds == label)[0]
# Find the correct prediction indices
correct_pred_indices = np.intersect1d(pred_indices, class_indices)
else:
correct_pred_indices = class_indices
# Extract the importance for the label
label_importance = importance_array[correct_pred_indices] + 1e-32
label_importance /= np.sum(label_importance)
actual_samples_count = min(samples_count, np.count_nonzero(label_importance))
#print('Storing {} samples from {} class'.format(actual_samples_count, label))
# If no samples are correctly classified then skip saving the samples
if (actual_samples_count != 0):
# Extract the important indices
imp_indices = np.random.choice(correct_pred_indices, actual_samples_count, p=label_importance, replace=False)
if count == 0:
images = dataset['images'][imp_indices]
labels = dataset['labels'][imp_indices]
else:
images = np.vstack((images, dataset['images'][imp_indices]))
labels = np.vstack((labels, dataset['labels'][imp_indices]))
count += 1
if count != 0:
return images, labels
else:
return None, None
def concatenate_datasets(current_images, current_labels, prev_images, prev_labels):
"""
Concatnates current dataset with the previous one. This will be used for
adding important samples from the previous datasets
Args:
current_images Images of current dataset
current_labels Labels of current dataset
prev_images List containing images of previous datasets
prev_labels List containing labels of previous datasets
Returns:
images Concatenated images
labels Concatenated labels
"""
"""
images = current_images
labels = current_labels
for i in range(len(prev_images)):
images = np.vstack((images, prev_images[i]))
labels = np.vstack((labels, prev_labels[i]))
"""
images = np.concatenate((current_images, prev_images), axis=0)
labels = np.concatenate((current_labels, prev_labels), axis=0)
return images, labels
def sample_from_dataset_icarl(dataset, features, task, samples_count, preds=None):
"""
Samples from a dataset based on a icarl - mean of features
Args:
dataset Dataset to sample from
features Features - activation before the last layer
task Labels with in a task
samples_count Number of samples to return
Return:
images Important images
labels Important labels
"""
print('Herding based sampling!')
#samples_count = min(samples_count, dataset['images'].shape[0])
count = 0
# For each label in the task extract the important samples
for label in task:
global_class_indices = np.column_stack(np.nonzero(dataset['labels']))
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == label][:,np.array([True, False])])
class_indices = np.sort(class_indices, axis=None)
if (preds is not None):
# Find the indices where prediction match the correct label
pred_indices = np.where(preds == label)[0]
# Find the correct prediction indices
correct_pred_indices = np.intersect1d(pred_indices, class_indices)
else:
correct_pred_indices = class_indices
mean_feature = np.mean(features[correct_pred_indices, :], axis=0)
actual_samples_count = min(samples_count, len(correct_pred_indices))
# If no samples are correctly classified then skip saving the samples
imp_indices = np.zeros(actual_samples_count, dtype=np.int32)
sample_sum= np.zeros(mean_feature.shape)
if (actual_samples_count != 0):
# Extract the important indices
for i in range(actual_samples_count):
sample_mean = (features[correct_pred_indices, :] +
np.tile(sample_sum, [len(correct_pred_indices),1]))/ float(i + 1)
norm_distance = np.linalg.norm((np.tile(mean_feature, [len(correct_pred_indices),1])
- sample_mean), ord=2, axis=1)
imp_indices[i] = correct_pred_indices[np.argmin(norm_distance)]
sample_sum = sample_sum + features[imp_indices[i], :]
if count == 0:
images = dataset['images'][imp_indices]
labels = dataset['labels'][imp_indices]
else:
images = np.vstack((images, dataset['images'][imp_indices]))
labels = np.vstack((labels, dataset['labels'][imp_indices]))
count += 1
if count != 0:
return images, labels
else:
return None, None
def average_acc_stats_across_runs(data, key):
"""
Compute the average accuracy statistics (mean and std) across runs
"""
num_runs = data.shape[0]
avg_acc = np.zeros(num_runs)
for i in range(num_runs):
avg_acc[i] = np.mean(data[i][-1])
return avg_acc.mean()*100, avg_acc.std()*100
def average_fgt_stats_across_runs(data, key):
"""
Compute the forgetting statistics (mean and std) across runs
"""
num_runs = data.shape[0]
fgt = np.zeros(num_runs)
wst_fgt = np.zeros(num_runs)
for i in range(num_runs):
fgt[i] = compute_fgt(data[i])
return fgt.mean(), fgt.std()
def compute_fgt(data):
"""
Given a TxT data matrix, compute average forgetting at T-th task
"""
num_tasks = data.shape[0]
T = num_tasks - 1
fgt = 0.0
for i in range(T):
fgt += np.max(data[:T,i]) - data[T, i]
avg_fgt = fgt/ float(num_tasks - 1)
return avg_fgt
def update_reservior(current_image, current_label, episodic_images, episodic_labels, M, N):
"""
Update the episodic memory with current example using the reservior sampling
"""
if M > N:
episodic_images[N] = current_image
episodic_labels[N] = current_label
else:
j = np.random.randint(0, N)
if j < M:
episodic_images[j] = current_image
episodic_labels[j] = current_label
| 17,295 | 41.600985 | 166 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/continual_learning_algorithms/utils/data_utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Define utility functions for manipulating datasets
"""
import os
import numpy as np
import sys
from copy import deepcopy
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
import tarfile
import zipfile
import random
import cv2
from scipy import ndimage
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
IMG_MEAN = np.array((103.94,116.78,123.68), dtype=np.float32)
############################################################
### Data augmentation utils ################################
############################################################
def image_scaling(images):
"""
Randomly scales the images between 0.5 to 1.5 times the original size.
Args:
images: Training images to scale.
"""
scale = tf.random_uniform([1], minval=0.5, maxval=1.5, dtype=tf.float32, seed=None)
h_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(images)[1]), scale))
w_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(images)[2]), scale))
new_shape = tf.squeeze(tf.stack([h_new, w_new]), squeeze_dims=[1])
images = tf.image.resize_images(images, new_shape)
result = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), images)
return result
def random_crop_and_pad_image(images, crop_h, crop_w):
"""
Randomly crop and pads the input images.
Args:
images: Training i mages to crop/ pad.
crop_h: Height of cropped segment.
crop_w: Width of cropped segment.
"""
image_shape = tf.shape(images)
image_pad = tf.image.pad_to_bounding_box(images, 0, 0, tf.maximum(crop_h, image_shape[1]), tf.maximum(crop_w, image_shape[2]))
img_crop = tf.map_fn(lambda img: tf.random_crop(img, [crop_h,crop_w,3]), image_pad)
return img_crop
def random_horizontal_flip(x):
"""
Randomly flip a batch of images horizontally
Args:
x Tensor of shape B x H x W x C
Returns:
random_flipped Randomly flipped tensor of shape B x H x W x C
"""
# Define random horizontal flip
flips = [(slice(None, None, None), slice(None, None, random.choice([-1, None])), slice(None, None, None))
for _ in xrange(x.shape[0])]
random_flipped = np.array([img[flip] for img, flip in zip(x, flips)])
return random_flipped
############################################################
### CIFAR download utils ###################################
############################################################
CIFAR_10_URL = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
CIFAR_100_URL = "http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
CIFAR_10_DIR = "/cifar_10"
CIFAR_100_DIR = "/cifar_100"
def construct_split_cifar(task_labels, is_cifar_100=True):
"""
Construct Split CIFAR-10 and CIFAR-100 datasets
Args:
task_labels Labels of different tasks
data_dir Data directory where the CIFAR data will be saved
"""
data_dir = 'CIFAR_data'
# Get the cifar dataset
cifar_data = _get_cifar(data_dir, is_cifar_100)
# Define a list for storing the data for different tasks
datasets = []
# Data splits
sets = ["train", "validation", "test"]
for task in task_labels:
for set_name in sets:
this_set = cifar_data[set_name]
global_class_indices = np.column_stack(np.nonzero(this_set[1]))
count = 0
for cls in task:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] ==
cls][:,np.array([True, False])])
else:
class_indices = np.append(class_indices, np.squeeze(global_class_indices[global_class_indices[:,1] ==\
cls][:,np.array([True, False])]))
count += 1
class_indices = np.sort(class_indices, axis=None)
if set_name == "train":
train = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
elif set_name == "validation":
validation = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
elif set_name == "test":
test = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
cifar = {
'train': train,
'validation': validation,
'test': test,
}
datasets.append(cifar)
return datasets
def _get_cifar(data_dir, is_cifar_100):
"""
Get the CIFAR-10 and CIFAR-100 datasets
Args:
data_dir Directory where the downloaded data will be stored
"""
x_train = None
y_train = None
x_validation = None
y_validation = None
x_test = None
y_test = None
l = None
# Download the dataset if needed
_cifar_maybe_download_and_extract(data_dir)
# Dictionary to store the dataset
dataset = dict()
dataset['train'] = []
dataset['validation'] = []
dataset['test'] = []
def dense_to_one_hot(labels_dense, num_classes=100):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
if is_cifar_100:
# Load the training data of CIFAR-100
f = open(data_dir + CIFAR_100_DIR + '/train', 'rb')
datadict = pickle.load(f, encoding='latin1')
f.close()
_X = datadict['data']
_Y = np.array(datadict['fine_labels'])
_Y = dense_to_one_hot(_Y, num_classes=100)
_X = np.array(_X, dtype=float) / 255.0
_X = _X.reshape([-1, 3, 32, 32])
_X = _X.transpose([0, 2, 3, 1])
# Compute the data mean for normalization
x_train_mean = np.mean(_X, axis=0)
x_train = _X[:40000]
y_train = _Y[:40000]
x_validation = _X[40000:]
y_validation = _Y[40000:]
else:
# Load all the training batches of the CIFAR-10
for i in range(5):
f = open(data_dir + CIFAR_10_DIR + '/data_batch_' + str(i + 1), 'rb')
datadict = pickle.load(f, encoding='latin1')
f.close()
_X = datadict['data']
_Y = np.array(datadict['labels'])
_Y = dense_to_one_hot(_Y, num_classes=10)
_X = np.array(_X, dtype=float) / 255.0
_X = _X.reshape([-1, 3, 32, 32])
_X = _X.transpose([0, 2, 3, 1])
if x_train is None:
x_train = _X
y_train = _Y
else:
x_train = np.concatenate((x_train, _X), axis=0)
y_train = np.concatenate((y_train, _Y), axis=0)
# Compute the data mean for normalization
x_train_mean = np.mean(x_train, axis=0)
x_validation = x_train[:40000] # We don't use validation set with CIFAR-10
y_validation = y_train[40000:]
# Normalize the train and validation sets
x_train -= x_train_mean
x_validation -= x_train_mean
dataset['train'].append(x_train)
dataset['train'].append(y_train)
dataset['train'].append(l)
dataset['validation'].append(x_validation)
dataset['validation'].append(y_validation)
dataset['validation'].append(l)
if is_cifar_100:
# Load the test batch of CIFAR-100
f = open(data_dir + CIFAR_100_DIR + '/test', 'rb')
datadict = pickle.load(f, encoding='latin1')
f.close()
_X = datadict['data']
_Y = np.array(datadict['fine_labels'])
_Y = dense_to_one_hot(_Y, num_classes=100)
else:
# Load the test batch of CIFAR-10
f = open(data_dir + CIFAR_10_DIR + '/test_batch', 'rb')
datadict = pickle.load(f, encoding='latin1')
f.close()
_X = datadict["data"]
_Y = np.array(datadict['labels'])
_Y = dense_to_one_hot(_Y, num_classes=10)
_X = np.array(_X, dtype=float) / 255.0
_X = _X.reshape([-1, 3, 32, 32])
_X = _X.transpose([0, 2, 3, 1])
x_test = _X
y_test = _Y
# Normalize the test set
x_test -= x_train_mean
dataset['test'].append(x_test)
dataset['test'].append(y_test)
dataset['test'].append(l)
return dataset
def _print_download_progress(count, block_size, total_size):
"""
Show the download progress of the cifar data
"""
pct_complete = float(count * block_size) / total_size
msg = "\r- Download progress: {0:.1%}".format(pct_complete)
sys.stdout.write(msg)
sys.stdout.flush()
def _cifar_maybe_download_and_extract(data_dir):
"""
Routine to download and extract the cifar dataset
Args:
data_dir Directory where the downloaded data will be stored
"""
cifar_10_directory = data_dir + CIFAR_10_DIR
cifar_100_directory = data_dir + CIFAR_100_DIR
# If the data_dir does not exist, create the directory and download
# the data
if not os.path.exists(data_dir):
os.makedirs(data_dir)
url = CIFAR_10_URL
filename = url.split('/')[-1]
file_path = os.path.join(data_dir, filename)
zip_cifar_10 = file_path
file_path, _ = urlretrieve(url=url, filename=file_path, reporthook=_print_download_progress)
print()
print("Download finished. Extracting files.")
if file_path.endswith(".zip"):
zipfile.ZipFile(file=file_path, mode="r").extractall(data_dir)
elif file_path.endswith((".tar.gz", ".tgz")):
tarfile.open(name=file_path, mode="r:gz").extractall(data_dir)
print("Done.")
url = CIFAR_100_URL
filename = url.split('/')[-1]
file_path = os.path.join(data_dir, filename)
zip_cifar_100 = file_path
file_path, _ = urlretrieve(url=url, filename=file_path, reporthook=_print_download_progress)
print()
print("Download finished. Extracting files.")
if file_path.endswith(".zip"):
zipfile.ZipFile(file=file_path, mode="r").extractall(data_dir)
elif file_path.endswith((".tar.gz", ".tgz")):
tarfile.open(name=file_path, mode="r:gz").extractall(data_dir)
print("Done.")
os.rename(data_dir + "/cifar-10-batches-py", cifar_10_directory)
os.rename(data_dir + "/cifar-100-python", cifar_100_directory)
os.remove(zip_cifar_10)
os.remove(zip_cifar_100)
#########################################
## MNIST Utils ##########################
#########################################
def reformat_mnist(datasets):
"""
Routine to Reformat the mnist dataset into a 3d tensor
"""
image_size = 28 # Height of MNIST dataset
num_channels = 1 # Gray scale
for i in range(len(datasets)):
sets = ["train", "validation", "test"]
for set_name in sets:
datasets[i]['%s'%set_name]['images'] = datasets[i]['%s'%set_name]['images'].reshape\
((-1, image_size, image_size, num_channels)).astype(np.float32)
return datasets
def rotate_image_by_angle(img, angle=45):
WIDTH, HEIGHT = 28 , 28
img = img.reshape((WIDTH, HEIGHT))
img = ndimage.rotate(img, angle, reshape=False, order=0)
out = np.array(img).flatten()
return out
def construct_rotate_mnist(num_tasks):
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
datasets = []
for i in range(num_tasks):
per_task_rotation = 180.0 / num_tasks
rotation_degree = (i - 1)*per_task_rotation
rotation_degree -= (np.random.random()*per_task_rotation)
copied_mnist = deepcopy(mnist)
sets = ["train", "validation", "test"]
for set_name in sets:
this_set = getattr(copied_mnist, set_name) # shallow copy
rotate_image_by_angle(this_set._images[0])
this_set._images = np.array([rotate_image_by_angle(img, rotation_degree) for img in this_set._images])
if set_name == "train":
train = {
'images':this_set._images,
'labels':this_set.labels,
}
elif set_name == "validation":
validation = {
'images':this_set._images,
'labels':this_set.labels,
}
elif set_name == "test":
test = {
'images':this_set._images,
'labels':this_set.labels,
}
dataset = {
'train': train,
'validation': validation,
'test': test,
}
datasets.append(dataset)
return datasets
def construct_permute_mnist(num_tasks):
"""
Construct a dataset of permutted mnist images
Args:
num_tasks Number of tasks
Returns
dataset A permutted mnist dataset
"""
# Download and store mnist dataset
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
datasets = []
for i in range(num_tasks):
perm_inds = list(range(mnist.train.images.shape[1]))
np.random.shuffle(perm_inds)
copied_mnist = deepcopy(mnist)
sets = ["train", "validation", "test"]
for set_name in sets:
this_set = getattr(copied_mnist, set_name) # shallow copy
this_set._images = np.transpose(np.array([this_set.images[:,c] for c in perm_inds]))
# print(this_set._images.shape)
if set_name == "train":
train = {
'images':this_set._images,
'labels':this_set.labels,
}
elif set_name == "validation":
validation = {
'images':this_set._images,
'labels':this_set.labels,
}
elif set_name == "test":
test = {
'images':this_set._images,
'labels':this_set.labels,
}
dataset = {
'train': train,
'validation': validation,
'test': test,
}
datasets.append(dataset)
return datasets
def construct_split_mnist(task_labels):
"""
Construct a split mnist dataset
Args:
task_labels List of split labels
Returns:
dataset A list of split datasets
"""
# Download and store mnist dataset
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
datasets = []
sets = ["train", "validation", "test"]
for task in task_labels:
for set_name in sets:
this_set = getattr(mnist, set_name)
global_class_indices = np.column_stack(np.nonzero(this_set.labels))
count = 0
for cls in task:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] ==\
cls][:,np.array([True, False])])
else:
class_indices = np.append(class_indices, np.squeeze(global_class_indices[global_class_indices[:,1] ==\
cls][:,np.array([True, False])]))
count += 1
class_indices = np.sort(class_indices, axis=None)
if set_name == "train":
train = {
'images':deepcopy(mnist.train.images[class_indices, :]),
'labels':deepcopy(mnist.train.labels[class_indices, :]),
}
elif set_name == "validation":
validation = {
'images':deepcopy(mnist.validation.images[class_indices, :]),
'labels':deepcopy(mnist.validation.labels[class_indices, :]),
}
elif set_name == "test":
test = {
'images':deepcopy(mnist.test.images[class_indices, :]),
'labels':deepcopy(mnist.test.labels[class_indices, :]),
}
mnist2 = {
'train': train,
'validation': validation,
'test': test,
}
datasets.append(mnist2)
return datasets
###################################################
###### ImageNet Utils #############################
###################################################
def construct_split_imagenet(task_labels, data_dir):
"""
Construct Split ImageNet dataset
Args:
task_labels Labels of different tasks
data_dir Data directory from where to load the imagenet data
"""
# Load the imagenet dataset
imagenet_data = _load_imagenet(data_dir)
# Define a list for storing the data for different tasks
datasets = []
# Data splits
sets = ["train", "test"]
for task in task_labels:
for set_name in sets:
this_set = imagenet_data[set_name]
global_class_indices = np.column_stack(np.nonzero(this_set[1]))
count = 0
for cls in task:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] ==
cls][:,np.array([True, False])])
else:
class_indices = np.append(class_indices, np.squeeze(global_class_indices[global_class_indices[:,1] ==\
cls][:,np.array([True, False])]))
count += 1
class_indices = np.sort(class_indices, axis=None)
if set_name == "train":
train = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
elif set_name == "test":
test = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
imagenet = {
'train': train,
'test': test,
}
datasets.append(imagenet)
return datasets
def _load_imagenet(data_dir):
"""
Load the ImageNet data
Args:
data_dir Directory where the pickle files have been dumped
"""
x_train = None
y_train = None
x_test = None
y_test = None
# Dictionary to store the dataset
dataset = dict()
dataset['train'] = []
dataset['test'] = []
def dense_to_one_hot(labels_dense, num_classes=100):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
# Load the training batches
for i in range(4):
f = open(data_dir + '/train_batch_' + str(i), 'rb')
datadict = pickle.load(f, encoding='latin1')
f.close()
_X = datadict['data']
_Y = np.array(datadict['labels'])
# Convert the lables to one-hot
_Y = dense_to_one_hot(_Y)
# Normalize the images
_X = np.array(_X, dtype=float)/ 255.0
_X = _X.reshape([-1, 224, 224, 3])
if x_train is None:
x_train = _X
y_train = _Y
else:
x_train = np.concatenate((x_train, _X), axis=0)
y_train = np.concatenate((y_train, _Y), axis=0)
dataset['train'].append(x_train)
dataset['train'].append(y_train)
# Load test batches
for i in range(4):
f = open(data_dir + '/test_batch_' + str(i), 'rb')
datadict = pickle.load(f, encoding='latin1')
f.close()
_X = datadict['data']
_Y = np.array(datadict['labels'])
# Convert the lables to one-hot
_Y = dense_to_one_hot(_Y)
# Normalize the images
_X = np.array(_X, dtype=float)/ 255.0
_X = _X.reshape([-1, 224, 224, 3])
if x_test is None:
x_test = _X
y_test = _Y
else:
x_test = np.concatenate((x_test, _X), axis=0)
y_test = np.concatenate((y_test, _Y), axis=0)
dataset['test'].append(x_test)
dataset['test'].append(y_test)
return dataset
if __name__ == "__main__":
construct_rotate_mnist(20)
# rotate_image_by_angle(np.array([[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]))
| 21,460 | 31.665145 | 130 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/continual_learning_algorithms/utils/vis_utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Define some utility functions
"""
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.colors as colors
import matplotlib.cm as cmx
import matplotlib.pyplot as plt
import matplotlib.figure as figure
from six.moves import cPickle as pickle
def snapshot_experiment_eval(logdir, experiment_id, data):
"""
Store the output of the experiment in a file
"""
snapshot_file = logdir + '/' + experiment_id + '.pickle'
with open(snapshot_file, 'wb') as f:
pickle.dump(data, f)
print('Experimental Eval has been snapshotted to %s!'%(snapshot_file))
def snapshot_task_labels(logdir, experiment_id, data):
"""
Store the output of the experiment in a file
"""
snapshot_file = logdir + '/' + experiment_id + '_task_labels.pickle'
with open(snapshot_file, 'wb') as f:
pickle.dump(data, f)
print('Experimental Eval has been snapshotted to %s!'%(snapshot_file))
def snapshot_experiment_meta_data(logdir, experiment_id, exper_meta_data):
"""
Store the meta-data of the experiment in a file
"""
meta_file = logdir + '/' + experiment_id + '.txt'
with open(meta_file, 'w') as f:
for key in exper_meta_data:
print('{}: {}'.format(key, exper_meta_data[key]))
f.write('{}:{} \n'.format(key, exper_meta_data[key]))
print('Experimental meta-data has been snapshotted to %s!'%(meta_file))
def plot_acc_multiple_runs(data, task_labels, valid_measures, n_stats, plot_name=None):
"""
Plots the accuracies
Args:
task_labels List of tasks
n_stats Number of runs
plot_name Name of the file where the plot will be saved
Returns:
"""
n_tasks = len(task_labels)
plt.figure(figsize=(14, 3))
axs = [plt.subplot(1,n_tasks+1,1)]
for i in range(1, n_tasks + 1):
axs.append(plt.subplot(1, n_tasks+1, i+1, sharex=axs[0], sharey=axs[0]))
fmt_chars = ['o', 's', 'd']
fmts = []
for i in range(len(valid_measures)):
fmts.append(fmt_chars[i%len(fmt_chars)])
plot_keys = sorted(data['mean'].keys())
for k, cval in enumerate(plot_keys):
label = "c=%g"%cval
mean_vals = data['mean'][cval]
std_vals = data['std'][cval]
for j in range(n_tasks+1):
plt.sca(axs[j])
errorbar_kwargs = dict(fmt="%s-"%fmts[k], markersize=5)
if j < n_tasks:
norm= np.sqrt(n_stats) # np.sqrt(n_stats) for SEM or 1 for STDEV
axs[j].errorbar(np.arange(n_tasks)+1, mean_vals[:, j], yerr=std_vals[:, j]/norm, label=label, **errorbar_kwargs)
else:
mean_stuff = []
std_stuff = []
for i in range(len(data['mean'][cval])):
mean_stuff.append(data['mean'][cval][i][:i+1].mean())
std_stuff.append(np.sqrt((data['std'][cval][i][:i+1]**2).sum())/(n_stats*np.sqrt(n_stats)))
plt.errorbar(range(1,n_tasks+1), mean_stuff, yerr=std_stuff, label="%s"%valid_measures[k], **errorbar_kwargs)
plt.xticks(np.arange(n_tasks)+1)
plt.xlim((1.0,5.5))
"""
# Uncomment this if clutter along y-axis needs to be removed
if j == 0:
axs[j].set_yticks([0.5,1])
else:
plt.setp(axs[j].get_yticklabels(), visible=False)
plt.ylim((0.45,1.1))
"""
for i, ax in enumerate(axs):
if i < n_tasks:
ax.set_title((['Task %d (%d to %d)'%(j+1,task_labels[j][0], task_labels[j][-1])\
for j in range(n_tasks)] + ['average'])[i], fontsize=8)
else:
ax.set_title("Average", fontsize=8)
ax.axhline(0.5, color='k', linestyle=':', label="chance", zorder=0)
handles, labels = axs[-1].get_legend_handles_labels()
# Reorder legend so chance is last
axs[-1].legend([handles[j] for j in [i for i in range(len(valid_measures)+1)]],
[labels[j] for j in [i for i in range(len(valid_measures)+1)]], loc='best', fontsize=6)
axs[0].set_xlabel("Tasks")
axs[0].set_ylabel("Accuracy")
plt.gcf().tight_layout()
plt.grid('on')
if plot_name == None:
plt.show()
else:
plt.savefig(plot_name)
def plot_histogram(data, n_bins=10, plot_name='my_hist'):
plt.hist(data, bins=n_bins)
plt.savefig(plot_name)
plt.close()
| 4,627 | 34.875969 | 128 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/continual_learning_algorithms/utils/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .data_utils import construct_permute_mnist, construct_split_mnist, construct_split_cifar, construct_rotate_mnist
from .data_utils import image_scaling, random_crop_and_pad_image, random_horizontal_flip
from .utils import clone_variable_list, create_fc_layer, create_conv_layer, sample_from_dataset, update_episodic_memory, update_episodic_memory_with_less_data, concatenate_datasets
from .utils import samples_for_each_class, sample_from_dataset_icarl, get_sample_weights, compute_fgt, load_task_specific_data, load_task_specific_data_in_proportion
from .utils import average_acc_stats_across_runs, average_fgt_stats_across_runs
from .vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from .resnet_utils import _conv, _fc, _bn, _residual_block, _residual_block_first
from .vgg_utils import vgg_conv_layer, vgg_fc_layer
from .er_utils import update_reservior, update_fifo_buffer, er_mem_update_hindsight, update_avg_image_vectors | 1,214 | 85.785714 | 181 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/continual_learning_algorithms/utils/resnet_utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import tensorflow as tf
import numpy as np
def _conv(x, kernel_size, out_channels, stride, var_list, pad="SAME", name="conv"):
"""
Define API for conv operation. This includes kernel declaration and
conv operation both.
"""
in_channels = x.get_shape().as_list()[-1]
with tf.variable_scope(name):
#n = kernel_size * kernel_size * out_channels
n = kernel_size * in_channels
stdv = 1.0 / math.sqrt(n)
w = tf.get_variable('kernel', [kernel_size, kernel_size, in_channels, out_channels],
tf.float32,
initializer=tf.random_uniform_initializer(-stdv, stdv))
#initializer=tf.random_normal_initializer(stddev=np.sqrt(2.0/n)))
# Append the variable to the trainable variables list
var_list.append(w)
# Do the convolution operation
output = tf.nn.conv2d(x, w, [1, stride, stride, 1], padding=pad)
return output
def _fc(x, out_dim, var_list, name="fc", is_cifar=False):
"""
Define API for the fully connected layer. This includes both the variable
declaration and matmul operation.
"""
in_dim = x.get_shape().as_list()[1]
stdv = 1.0 / math.sqrt(in_dim)
with tf.variable_scope(name):
# Define the weights and biases for this layer
w = tf.get_variable('weights', [in_dim, out_dim], tf.float32,
initializer=tf.random_uniform_initializer(-stdv, stdv))
#initializer=tf.truncated_normal_initializer(stddev=0.1))
if is_cifar:
b = tf.get_variable('biases', [out_dim], tf.float32, initializer=tf.random_uniform_initializer(-stdv, stdv))
else:
b = tf.get_variable('biases', [out_dim], tf.float32, initializer=tf.constant_initializer(0))
# Append the variable to the trainable variables list
var_list.append(w)
var_list.append(b)
# Do the FC operation
output = tf.matmul(x, w) + b
return output
def _bn(x, var_list, train_phase, name='bn_'):
"""
Batch normalization on convolutional maps.
Args:
Return:
"""
n_out = x.get_shape().as_list()[3]
with tf.variable_scope(name):
beta = tf.get_variable('beta', shape=[n_out], dtype=tf.float32, initializer=tf.constant_initializer(0.0))
gamma = tf.get_variable('gamma', shape=[n_out], dtype=tf.float32, initializer=tf.constant_initializer(1.0))
var_list.append(beta)
var_list.append(gamma)
batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.9)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(train_phase,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
def _residual_block(x, trainable_vars, train_phase, apply_relu=True, name="unit"):
"""
ResNet block when the number of channels across the skip connections are the same
"""
in_channels = x.get_shape().as_list()[-1]
with tf.variable_scope(name) as scope:
shortcut = x
x = _conv(x, 3, in_channels, 1, trainable_vars, name='conv_1')
x = _bn(x, trainable_vars, train_phase, name="bn_1")
x = tf.nn.relu(x)
x = _conv(x, 3, in_channels, 1, trainable_vars, name='conv_2')
x = _bn(x, trainable_vars, train_phase, name="bn_2")
x = x + shortcut
if apply_relu == True:
x = tf.nn.relu(x)
return x
def _residual_block_first(x, out_channels, strides, trainable_vars, train_phase, apply_relu=True, name="unit", is_ATT_DATASET=False):
"""
A generic ResNet Block
"""
in_channels = x.get_shape().as_list()[-1]
with tf.variable_scope(name) as scope:
# Figure out the shortcut connection first
if in_channels == out_channels:
if strides == 1:
shortcut = tf.identity(x)
else:
shortcut = tf.nn.max_pool(x, [1, strides, strides, 1], [1, strides, strides, 1], 'VALID')
else:
shortcut = _conv(x, 1, out_channels, strides, trainable_vars, name="shortcut")
if not is_ATT_DATASET:
shortcut = _bn(shortcut, trainable_vars, train_phase, name="bn_0")
# Residual block
x = _conv(x, 3, out_channels, strides, trainable_vars, name="conv_1")
x = _bn(x, trainable_vars, train_phase, name="bn_1")
x = tf.nn.relu(x)
x = _conv(x, 3, out_channels, 1, trainable_vars, name="conv_2")
x = _bn(x, trainable_vars, train_phase, name="bn_2")
x = x + shortcut
if apply_relu:
x = tf.nn.relu(x)
return x
| 5,216 | 38.225564 | 133 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/continual_learning_algorithms/model/model.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Model defintion
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
from utils import clone_variable_list, create_fc_layer, create_conv_layer
from utils.resnet_utils import _conv, _fc, _bn, _residual_block, _residual_block_first
from utils.vgg_utils import vgg_conv_layer, vgg_fc_layer
PARAM_XI_STEP = 1e-3
NEG_INF = -1e32
EPSILON = 1e-32
HYBRID_ALPHA = 0.5
TRAIN_ENTROPY_BASED_SUM = False
def weight_variable(shape, name='fc', init_type='default'):
"""
Define weight variables
Args:
shape Shape of the bias variable tensor
Returns:
A tensor of size shape initialized from a random normal
"""
with tf.variable_scope(name):
if init_type == 'default':
weights = tf.get_variable('weights', shape, tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.1))
#weights = tf.Variable(tf.truncated_normal(shape, stddev=0.1), name='weights')
elif init_type == 'zero':
weights = tf.get_variable('weights', shape, tf.float32, initializer=tf.constant_initializer(0.1))
#weights = tf.Variable(tf.constant(0.1, shape=shape, dtype=np.float32), name='weights')
return weights
def bias_variable(shape, name='fc'):
"""
Define bias variables
Args:
shape Shape of the bias variable tensor
Returns:
A tensor of size shape initialized from a constant
"""
with tf.variable_scope(name):
biases = tf.get_variable('biases', shape, initializer=tf.constant_initializer(0.1))
return biases
#return tf.Variable(tf.constant(0.1, shape=shape, dtype=np.float32), name='biases') #TODO: Should we initialize it from 0
class Model:
"""
A class defining the model
"""
def __init__(self, x_train, y_, num_tasks, opt, imp_method, synap_stgth, fisher_update_after, fisher_ema_decay, network_arch='FC-S',
is_ATT_DATASET=False, x_test=None, attr=None, all_args=None):
"""
Instantiate the model
"""
# Define some placeholders which are used to feed the data to the model
self.y_ = y_
if imp_method == 'PNN':
self.train_phase = []
self.total_classes = int(self.y_[0].get_shape()[1])
self.train_phase = [tf.placeholder(tf.bool, name='train_phase_%d'%(i)) for i in range(num_tasks)]
self.output_mask = [tf.placeholder(dtype=tf.float32, shape=[self.total_classes]) for i in range(num_tasks)]
else:
self.total_classes = int(self.y_.get_shape()[1])
self.train_phase = tf.placeholder(tf.bool, name='train_phase')
if (imp_method == 'A-GEM' or imp_method == 'ER') and 'FC-' not in network_arch: # Only for Split-X setups
self.output_mask = [tf.placeholder(dtype=tf.float32, shape=[self.total_classes]) for i in range(num_tasks)]
self.mem_batch_size = tf.placeholder(dtype=tf.float32, shape=())
else:
self.output_mask = tf.placeholder(dtype=tf.float32, shape=[self.total_classes])
self.sample_weights = tf.placeholder(tf.float32, shape=[None])
self.task_id = tf.placeholder(dtype=tf.int32, shape=())
self.store_grad_batches = tf.placeholder(dtype=tf.float32, shape=())
self.keep_prob = tf.placeholder(dtype=tf.float32, shape=())
self.train_samples = tf.placeholder(dtype=tf.float32, shape=())
self.training_iters = tf.placeholder(dtype=tf.float32, shape=())
self.train_step = tf.placeholder(dtype=tf.float32, shape=())
self.violation_count = tf.Variable(0, dtype=tf.float32, trainable=False)
self.is_ATT_DATASET = is_ATT_DATASET # To use a different (standard one) ResNet-18 for CUB
if x_test is not None:
# If CUB datatset then use augmented x (x_train) for training and non-augmented x (x_test) for testing
self.x = tf.cond(self.train_phase, lambda: tf.identity(x_train), lambda: tf.identity(x_test))
train_shape = x_train.get_shape().as_list()
x = tf.reshape(self.x, [-1, train_shape[1], train_shape[2], train_shape[3]])
else:
# We don't use data augmentation for other datasets
self.x = x_train
x = self.x
# Class attributes for zero shot transfer
self.class_attr = attr
if self.class_attr is not None:
self.attr_dims = int(self.class_attr.get_shape()[1])
# Save the arguments passed from the main script
self.global_step = tf.Variable(0, trainable=False)
self.learning_rate = tf.train.exponential_decay(all_args.learning_rate, self.global_step, 3100, all_args.decay, staircase=True)
self.opt = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)#opt
self.num_tasks = num_tasks
self.imp_method = imp_method
self.fisher_update_after = fisher_update_after
self.fisher_ema_decay = fisher_ema_decay
self.network_arch = network_arch
# A scalar variable for previous syanpse strength
self.synap_stgth = tf.constant(synap_stgth, shape=[1], dtype=tf.float32)
self.triplet_loss_scale = 2.1
# Define different variables
self.weights_old = []
self.star_vars = []
self.small_omega_vars = []
self.big_omega_vars = []
self.big_omega_riemann_vars = []
self.fisher_diagonal_at_minima = []
self.hebbian_score_vars = []
self.running_fisher_vars = []
self.tmp_fisher_vars = []
self.max_fisher_vars = []
self.min_fisher_vars = []
self.max_score_vars = []
self.min_score_vars = []
self.normalized_score_vars = []
self.score_vars = []
self.normalized_fisher_at_minima_vars = []
self.weights_delta_old_vars = []
self.ref_grads = []
self.projected_gradients_list = []
if self.class_attr is not None:
self.loss_and_train_ops_for_attr_vector(x, self.y_)
else:
self.loss_and_train_ops_for_one_hot_vector(x, self.y_)
# Set the operations to reset the optimier when needed
self.reset_optimizer_ops()
####################################################################################
#### Internal APIs of the class. These should not be called/ exposed externally ####
####################################################################################
def loss_and_train_ops_for_one_hot_vector(self, x, y_):
"""
Loss and training operations for the training of one-hot vector based classification model
"""
# Define approproate network
if self.network_arch == 'FC-S':
input_dim = int(x.get_shape()[1])
layer_dims = [input_dim, 256, 256, self.total_classes]
if self.imp_method == 'PNN':
self.task_logits = []
self.task_pruned_logits = []
self.unweighted_entropy = []
for i in range(self.num_tasks):
if i == 0:
self.task_logits.append(self.init_fc_column_progNN(layer_dims, x))
self.task_pruned_logits.append(tf.where(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(self.task_logits[i])[0], 1]), self.task_logits[i], NEG_INF*tf.ones_like(self.task_logits[i])))
self.unweighted_entropy.append(tf.squeeze(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_[i], logits=self.task_pruned_logits[i])))) # mult by mean(y_[i]) puts unwaranted loss to 0
else:
self.task_logits.append(self.extensible_fc_column_progNN(layer_dims, x, i))
self.task_pruned_logits.append(tf.where(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(self.task_logits[i])[0], 1]), self.task_logits[i], NEG_INF*tf.ones_like(self.task_logits[i])))
self.unweighted_entropy.append(tf.squeeze(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_[i], logits=self.task_pruned_logits[i])))) # mult by mean(y_[i]) puts unwaranted loss to 0
else:
self.fc_variables(layer_dims)
logits = self.fc_feedforward(x, self.weights, self.biases)
elif self.network_arch == 'FC-B':
input_dim = int(x.get_shape()[1])
layer_dims = [input_dim, 2000, 2000, self.total_classes]
self.fc_variables(layer_dims)
logits = self.fc_feedforward(x, self.weights, self.biases)
elif self.network_arch == 'CNN':
num_channels = int(x.get_shape()[-1])
self.image_size = int(x.get_shape()[1])
kernels = [3, 3, 3, 3, 3]
depth = [num_channels, 32, 32, 64, 64, 512]
self.conv_variables(kernels, depth)
logits = self.conv_feedforward(x, self.weights, self.biases, apply_dropout=True)
elif self.network_arch == 'VGG':
# VGG-16
logits = self.vgg_16_conv_feedforward(x)
elif 'RESNET-' in self.network_arch:
if self.network_arch == 'RESNET-S':
# Same resnet-18 as used in GEM paper
kernels = [3, 3, 3, 3, 3]
filters = [20, 20, 40, 80, 160]
strides = [1, 0, 2, 2, 2]
elif self.network_arch == 'RESNET-B':
# Standard ResNet-18
kernels = [7, 3, 3, 3, 3]
filters = [64, 64, 128, 256, 512]
strides = [2, 0, 2, 2, 2]
if self.imp_method == 'PNN':
self.task_logits = []
self.task_pruned_logits = []
self.unweighted_entropy = []
for i in range(self.num_tasks):
if i == 0:
self.task_logits.append(self.init_resent_column_progNN(x, kernels, filters, strides))
else:
self.task_logits.append(self.extensible_resnet_column_progNN(x, kernels, filters, strides, i))
self.task_pruned_logits.append(tf.where(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(self.task_logits[i])[0], 1]), self.task_logits[i], NEG_INF*tf.ones_like(self.task_logits[i])))
self.unweighted_entropy.append(tf.squeeze(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_[i], logits=self.task_pruned_logits[i]))))
elif self.imp_method == 'A-GEM' or self.imp_method == 'ER':
logits = self.resnet18_conv_feedforward(x, kernels, filters, strides)
self.task_pruned_logits = []
self.unweighted_entropy = []
for i in range(self.num_tasks):
self.task_pruned_logits.append(tf.where(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(logits)[0], 1]), logits, NEG_INF*tf.ones_like(logits)))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=self.task_pruned_logits[i])
adjusted_entropy = tf.reduce_sum(tf.cast(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(y_)[0], 1]), dtype=tf.float32) * y_, axis=1) * cross_entropy
self.unweighted_entropy.append(tf.reduce_sum(adjusted_entropy)) # We will average it later on
else:
logits = self.resnet18_conv_feedforward(x, kernels, filters, strides)
# Prune the predictions to only include the classes for which
# the training data is present
if (self.imp_method != 'PNN') and ((self.imp_method != 'A-GEM' and self.imp_method != 'ER') or 'FC-' in self.network_arch):
self.pruned_logits = tf.where(tf.tile(tf.equal(self.output_mask[None,:], 1.0), [tf.shape(logits)[0], 1]), logits, NEG_INF*tf.ones_like(logits))
# Create list of variables for storing different measures
# Note: This method has to be called before calculating fisher
# or any other importance measure
self.init_vars()
# Different entropy measures/ loss definitions
if (self.imp_method != 'PNN') and ((self.imp_method != 'A-GEM' and self.imp_method != 'ER') or 'FC-' in self.network_arch):
self.mse = 2.0*tf.nn.l2_loss(self.pruned_logits) # tf.nn.l2_loss computes sum(T**2)/ 2
self.weighted_entropy = tf.reduce_mean(tf.losses.softmax_cross_entropy(y_,
self.pruned_logits, self.sample_weights, reduction=tf.losses.Reduction.NONE))
self.unweighted_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_,
logits=self.pruned_logits))
# Create operations for loss and gradient calculation
self.loss_and_gradients(self.imp_method)
if self.imp_method != 'PNN':
# Store the current weights before doing a train step
self.get_current_weights()
# For GEM variants train ops will be defined later
if 'GEM' not in self.imp_method:
# Define the training operation here as Pathint ops depend on the train ops
self.train_op()
# Create operations to compute importance depending on the importance methods
if self.imp_method == 'EWC':
self.create_fisher_ops()
elif self.imp_method == 'M-EWC':
self.create_fisher_ops()
self.create_pathint_ops()
self.combined_fisher_pathint_ops()
elif self.imp_method == 'PI':
self.create_pathint_ops()
elif self.imp_method == 'RWALK':
self.create_fisher_ops()
self.create_pathint_ops()
elif self.imp_method == 'MAS':
self.create_hebbian_ops()
elif self.imp_method == 'A-GEM' or self.imp_method == 'S-GEM':
self.create_stochastic_gem_ops()
if self.imp_method != 'PNN':
# Create weight save and store ops
self.weights_store_ops()
# Summary operations for visualization
tf.summary.scalar("unweighted_entropy", self.unweighted_entropy)
for v in self.trainable_vars:
tf.summary.histogram(v.name.replace(":", "_"), v)
self.merged_summary = tf.summary.merge_all()
# Accuracy measure
if (self.imp_method == 'PNN') or ((self.imp_method == 'A-GEM' or self.imp_method == 'ER') and 'FC-' not in self.network_arch):
self.correct_predictions = []
self.accuracy = []
for i in range(self.num_tasks):
if self.imp_method == 'PNN':
self.correct_predictions.append(tf.equal(tf.argmax(self.task_pruned_logits[i], 1), tf.argmax(y_[i], 1)))
else:
self.correct_predictions.append(tf.equal(tf.argmax(self.task_pruned_logits[i], 1), tf.argmax(y_, 1)))
self.accuracy.append(tf.reduce_mean(tf.cast(self.correct_predictions[i], tf.float32)))
else:
self.correct_predictions = tf.equal(tf.argmax(self.pruned_logits, 1), tf.argmax(y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_predictions, tf.float32))
def loss_and_train_ops_for_attr_vector(self, x, y_):
"""
Loss and training operations for the training of joined embedding model
"""
# Define approproate network
if self.network_arch == 'FC-S':
input_dim = int(x.get_shape()[1])
layer_dims = [input_dim, 256, 256, self.total_classes]
self.fc_variables(layer_dims)
logits = self.fc_feedforward(x, self.weights, self.biases)
elif self.network_arch == 'FC-B':
input_dim = int(x.get_shape()[1])
layer_dims = [input_dim, 2000, 2000, self.total_classes]
self.fc_variables(layer_dims)
logits = self.fc_feedforward(x, self.weights, self.biases)
elif self.network_arch == 'CNN':
num_channels = int(x.get_shape()[-1])
self.image_size = int(x.get_shape()[1])
kernels = [3, 3, 3, 3, 3]
depth = [num_channels, 32, 32, 64, 64, 512]
self.conv_variables(kernels, depth)
logits = self.conv_feedforward(x, self.weights, self.biases, apply_dropout=True)
elif self.network_arch == 'VGG':
# VGG-16
phi_x = self.vgg_16_conv_feedforward(x)
elif self.network_arch == 'RESNET-S':
# Standard ResNet-18
kernels = [3, 3, 3, 3, 3]
filters = [20, 20, 40, 80, 160]
strides = [1, 0, 2, 2, 2]
# Get the image features
phi_x = self.resnet18_conv_feedforward(x, kernels, filters, strides)
elif self.network_arch == 'RESNET-B':
# Standard ResNet-18
kernels = [7, 3, 3, 3, 3]
filters = [64, 64, 128, 256, 512]
strides = [2, 0, 2, 2, 2]
# Get the image features
phi_x = self.resnet18_conv_feedforward(x, kernels, filters, strides)
# Get the attributes embedding
attr_embed = self.get_attribute_embedding(self.class_attr) # Does not contain biases yet, Dimension: TOTAL_CLASSES x image_feature_dim
# Add the biases now
last_layer_biases = bias_variable([self.total_classes], name='attr_embed_b')
self.trainable_vars.append(last_layer_biases)
# Now that we have all the trainable variables, initialize the different book keeping variables
# Note: This method has to be called before calculating fisher
# or any other importance measure
self.init_vars()
# Compute the logits for the ZST case
zst_logits = tf.matmul(phi_x, tf.transpose(attr_embed)) + last_layer_biases
# Prune the predictions to only include the classes for which
# the training data is present
if self.imp_method == 'A-GEM':
pruned_zst_logits = []
self.unweighted_entropy = []
for i in range(self.num_tasks):
pruned_zst_logits.append(tf.where(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(zst_logits)[0], 1]), zst_logits, NEG_INF*tf.ones_like(zst_logits)))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=pruned_zst_logits[i])
adjusted_entropy = tf.reduce_sum(tf.cast(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(y_)[0], 1]), dtype=tf.float32) * y_, axis=1) * cross_entropy
self.unweighted_entropy.append(tf.reduce_sum(adjusted_entropy))
else:
pruned_zst_logits = tf.where(tf.tile(tf.equal(self.output_mask[None,:], 1.0),
[tf.shape(zst_logits)[0], 1]), zst_logits, NEG_INF*tf.ones_like(zst_logits))
self.unweighted_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=pruned_zst_logits))
self.mse = 2.0*tf.nn.l2_loss(pruned_zst_logits) # tf.nn.l2_loss computes sum(T**2)/ 2
# Create operations for loss and gradient calculation
self.loss_and_gradients(self.imp_method)
# Store the current weights before doing a train step
self.get_current_weights()
if 'GEM' not in self.imp_method:
self.train_op()
# Create operations to compute importance depending on the importance methods
if self.imp_method == 'EWC':
self.create_fisher_ops()
elif self.imp_method == 'M-EWC':
self.create_fisher_ops()
self.create_pathint_ops()
self.combined_fisher_pathint_ops()
elif self.imp_method == 'PI':
self.create_pathint_ops()
elif self.imp_method == 'RWALK':
self.create_fisher_ops()
self.create_pathint_ops()
elif self.imp_method == 'MAS':
self.create_hebbian_ops()
elif (self.imp_method == 'A-GEM') or (self.imp_method == 'S-GEM'):
self.create_stochastic_gem_ops()
# Create weight save and store ops
self.weights_store_ops()
# Summary operations for visualization
tf.summary.scalar("triplet_loss", self.unweighted_entropy)
for v in self.trainable_vars:
tf.summary.histogram(v.name.replace(":", "_"), v)
self.merged_summary = tf.summary.merge_all()
# Accuracy measure
if self.imp_method == 'A-GEM' and 'FC-' not in self.network_arch:
self.correct_predictions = []
self.accuracy = []
for i in range(self.num_tasks):
self.correct_predictions.append(tf.equal(tf.argmax(pruned_zst_logits[i], 1), tf.argmax(y_, 1)))
self.accuracy.append(tf.reduce_mean(tf.cast(self.correct_predictions[i], tf.float32)))
else:
self.correct_predictions = tf.equal(tf.argmax(pruned_zst_logits, 1), tf.argmax(y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_predictions, tf.float32))
def init_fc_column_progNN(self, layer_dims, h, apply_dropout=False):
"""
Defines the first column of Progressive NN - FC Networks
"""
self.trainable_vars = []
self.h_pnn = []
self.trainable_vars.append([])
self.h_pnn.append([])
self.h_pnn[0].append(h)
for i in range(len(layer_dims)-1):
w = weight_variable([layer_dims[i], layer_dims[i+1]], name='fc_w_%d_t0'%(i))
b = bias_variable([layer_dims[i+1]], name='fc_b_%d_t0'%(i))
self.trainable_vars[0].append(w)
self.trainable_vars[0].append(b)
if i == len(layer_dims) - 2:
# Last layer (logits) - don't apply the relu
h = create_fc_layer(h, w, b, apply_relu=False)
else:
h = create_fc_layer(h, w, b)
if apply_dropout:
h = tf.nn.dropout(h, 1)
self.h_pnn[0].append(h)
return h
def extensible_fc_column_progNN(self, layer_dims, h, task, apply_dropout=False):
"""
Define the subsequent columns of the progressive NN - FC Networks
"""
self.trainable_vars.append([])
self.h_pnn.append([])
self.h_pnn[task].append(h)
for i in range(len(layer_dims)-1):
w = weight_variable([layer_dims[i], layer_dims[i+1]], name='fc_w_%d_t%d'%(i, task))
b = bias_variable([layer_dims[i+1]], name='fc_b_%d_t%d'%(i, task))
self.trainable_vars[task].append(w)
self.trainable_vars[task].append(b)
preactivation = create_fc_layer(h, w, b, apply_relu=False)
for tt in range(task):
U_w = weight_variable([layer_dims[i], layer_dims[i+1]], name='fc_uw_%d_t%d_tt%d'%(i, task, tt))
U_b = bias_variable([layer_dims[i+1]], name='fc_ub_%d_t%d_tt%d'%(i, task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
preactivation += create_fc_layer(self.h_pnn[tt][i], U_w, U_b, apply_relu=False)
if i == len(layer_dims) - 2:
# Last layer (logits) - don't apply the relu
h = preactivation
else:
# layer < last layer, apply relu
h = tf.nn.relu(preactivation)
if apply_dropout:
h = tf.nn.dropout(h)
self.h_pnn[task].append(h)
return h
def init_resent_column_progNN(self, x, kernels, filters, strides):
"""
Defines the first column of Progressive NN - ResNet-18
"""
self.trainable_vars = []
self.h_pnn = []
self.trainable_vars.append([])
self.h_pnn.append([])
self.h_pnn[0].append(x)
# Conv1
h = _conv(x, kernels[0], filters[0], strides[0], self.trainable_vars[0], name='conv_1_t0')
h = _bn(h, self.trainable_vars[0], self.train_phase[0], name='bn_1_t0')
h = tf.nn.relu(h)
self.h_pnn[0].append(h)
# Conv2_x
h = _residual_block(h, self.trainable_vars[0], self.train_phase[0], name='conv2_1_t0')
h = _residual_block(h, self.trainable_vars[0], self.train_phase[0], name='conv2_2_t0')
self.h_pnn[0].append(h)
# Conv3_x
h = _residual_block_first(h, filters[2], strides[2], self.trainable_vars[0], self.train_phase[0], name='conv3_1_t0', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[0], self.train_phase[0], name='conv3_2_t0')
self.h_pnn[0].append(h)
# Conv4_x
h = _residual_block_first(h, filters[3], strides[3], self.trainable_vars[0], self.train_phase[0], name='conv4_1_t0', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[0], self.train_phase[0], name='conv4_2_t0')
self.h_pnn[0].append(h)
# Conv5_x
h = _residual_block_first(h, filters[4], strides[4], self.trainable_vars[0], self.train_phase[0], name='conv5_1_t0', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[0], self.train_phase[0], name='conv5_2_t0')
self.h_pnn[0].append(h)
# Apply average pooling
h = tf.reduce_mean(h, [1, 2])
if self.network_arch == 'RESNET-S':
logits = _fc(h, self.total_classes, self.trainable_vars[0], name='fc_1_t0', is_cifar=True)
else:
logits = _fc(h, self.total_classes, self.trainable_vars[0], name='fc_1_t0')
self.h_pnn[0].append(logits)
return logits
def extensible_resnet_column_progNN(self, x, kernels, filters, strides, task):
"""
Define the subsequent columns of the progressive NN - ResNet-18
"""
self.trainable_vars.append([])
self.h_pnn.append([])
self.h_pnn[task].append(x)
# Conv1
h = _conv(x, kernels[0], filters[0], strides[0], self.trainable_vars[task], name='conv_1_t%d'%(task))
h = _bn(h, self.trainable_vars[task], self.train_phase[task], name='bn_1_t%d'%(task))
# Add lateral connections
for tt in range(task):
U_w = weight_variable([1, 1, self.h_pnn[tt][0].get_shape().as_list()[-1], h.get_shape().as_list()[-1]], name='conv_1_w_t%d_tt%d'%(task, tt))
U_b = bias_variable([h.get_shape().as_list()[-1]], name='conv_1_b_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
h += create_conv_layer(self.h_pnn[tt][0], U_w, U_b, apply_relu=False)
h = tf.nn.relu(h)
self.h_pnn[task].append(h)
# Conv2_x
h = _residual_block(h, self.trainable_vars[task], self.train_phase[task], name='conv2_1_t%d'%(task))
h = _residual_block(h, self.trainable_vars[task], self.train_phase[task], apply_relu=False, name='conv2_2_t%d'%(task))
# Add lateral connections
for tt in range(task):
U_w = weight_variable([1, 1, self.h_pnn[tt][1].get_shape().as_list()[-1], h.get_shape().as_list()[-1]], name='conv_2_w_t%d_tt%d'%(task, tt))
U_b = bias_variable([h.get_shape().as_list()[-1]], name='conv_2_b_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
h += create_conv_layer(self.h_pnn[tt][1], U_w, U_b, apply_relu=False)
h = tf.nn.relu(h)
self.h_pnn[task].append(h)
# Conv3_x
h = _residual_block_first(h, filters[2], strides[2], self.trainable_vars[task], self.train_phase[task], name='conv3_1_t%d'%(task), is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[task], self.train_phase[task], apply_relu=False, name='conv3_2_t%d'%(task))
# Add lateral connections
for tt in range(task):
U_w = weight_variable([1, 1, self.h_pnn[tt][2].get_shape().as_list()[-1], h.get_shape().as_list()[-1]], name='conv_3_w_t%d_tt%d'%(task, tt))
U_b = bias_variable([h.get_shape().as_list()[-1]], name='conv_3_b_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
h += create_conv_layer(self.h_pnn[tt][2], U_w, U_b, stride=strides[2], apply_relu=False)
h = tf.nn.relu(h)
self.h_pnn[task].append(h)
# Conv4_x
h = _residual_block_first(h, filters[3], strides[3], self.trainable_vars[task], self.train_phase[task], name='conv4_1_t%d'%(task), is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[task], self.train_phase[task], apply_relu=False, name='conv4_2_t%d'%(task))
# Add lateral connections
for tt in range(task):
U_w = weight_variable([1, 1, self.h_pnn[tt][3].get_shape().as_list()[-1], h.get_shape().as_list()[-1]], name='conv_4_w_t%d_tt%d'%(task, tt))
U_b = bias_variable([h.get_shape().as_list()[-1]], name='conv_4_b_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
h += create_conv_layer(self.h_pnn[tt][3], U_w, U_b, stride=strides[3], apply_relu=False)
h = tf.nn.relu(h)
self.h_pnn[task].append(h)
# Conv5_x
h = _residual_block_first(h, filters[4], strides[4], self.trainable_vars[task], self.train_phase[task], name='conv5_1_t%d'%(task), is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[task], self.train_phase[task], apply_relu=False, name='conv5_2_t%d'%(task))
# Add lateral connections
for tt in range(task):
U_w = weight_variable([1, 1, self.h_pnn[tt][4].get_shape().as_list()[-1], h.get_shape().as_list()[-1]], name='conv_5_w_t%d_tt%d'%(task, tt))
U_b = bias_variable([h.get_shape().as_list()[-1]], name='conv_5_b_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
h += create_conv_layer(self.h_pnn[tt][4], U_w, U_b, stride=strides[4], apply_relu=False)
h = tf.nn.relu(h)
self.h_pnn[task].append(h)
# Apply average pooling
h = tf.reduce_mean(h, [1, 2])
if self.network_arch == 'RESNET-S':
logits = _fc(h, self.total_classes, self.trainable_vars[task], name='fc_1_t%d'%(task), is_cifar=True)
else:
logits = _fc(h, self.total_classes, self.trainable_vars[task], name='fc_1_t%d'%(task))
for tt in range(task):
h_tt = tf.reduce_mean(self.h_pnn[tt][5], [1, 2])
U_w = weight_variable([h_tt.get_shape().as_list()[1], self.total_classes], name='fc_uw_1_t%d_tt%d'%(task, tt))
U_b = bias_variable([self.total_classes], name='fc_ub_1_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
logits += create_fc_layer(h_tt, U_w, U_b, apply_relu=False)
self.h_pnn[task].append(logits)
return logits
def fc_variables(self, layer_dims):
"""
Defines variables for a 3-layer fc network
Args:
Returns:
"""
self.weights = []
self.biases = []
self.trainable_vars = []
for i in range(len(layer_dims)-1):
w = weight_variable([layer_dims[i], layer_dims[i+1]], name='fc_%d'%(i))
b = bias_variable([layer_dims[i+1]], name='fc_%d'%(i))
self.weights.append(w)
self.biases.append(b)
self.trainable_vars.append(w)
self.trainable_vars.append(b)
def fc_feedforward(self, h, weights, biases, apply_dropout=True):
"""
Forward pass through a fc network
Args:
h Input image (tensor)
weights List of weights for a fc network
biases List of biases for a fc network
apply_dropout Whether to apply droupout (True/ False)
Returns:
Logits of a fc network
"""
if apply_dropout:
h = tf.nn.dropout(h, 1) # Apply dropout on Input?
for (w, b) in list(zip(weights, biases))[:-1]:
h = create_fc_layer(h, w, b)
if apply_dropout:
h = tf.nn.dropout(h, keep_prob=0.75) # Apply dropout on hidden layers?
# Store image features
self.features = h
self.image_feature_dim = h.get_shape().as_list()[-1]
return create_fc_layer(h, weights[-1], biases[-1], apply_relu=False)
def conv_variables(self, kernel, depth):
"""
Defines variables of a 5xconv-1xFC convolutional network
Args:
Returns:
"""
self.weights = []
self.biases = []
self.trainable_vars = []
div_factor = 1
for i in range(len(kernel)):
w = weight_variable([kernel[i], kernel[i], depth[i], depth[i+1]], name='conv_%d'%(i))
b = bias_variable([depth[i+1]], name='conv_%d'%(i))
self.weights.append(w)
self.biases.append(b)
self.trainable_vars.append(w)
self.trainable_vars.append(b)
# Since we maxpool after every two conv layers
if ((i+1) % 2 == 0):
div_factor *= 2
flat_units = (self.image_size // div_factor) * (self.image_size // div_factor) * depth[-1]
w = weight_variable([flat_units, self.total_classes], name='fc_%d'%(i))
b = bias_variable([self.total_classes], name='fc_%d'%(i))
self.weights.append(w)
self.biases.append(b)
self.trainable_vars.append(w)
self.trainable_vars.append(b)
def conv_feedforward(self, h, weights, biases, apply_dropout=True):
"""
Forward pass through a convolutional network
Args:
h Input image (tensor)
weights List of weights for a conv network
biases List of biases for a conv network
apply_dropout Whether to apply droupout (True/ False)
Returns:
Logits of a conv network
"""
for i, (w, b) in enumerate(list(zip(weights, biases))[:-1]):
# Apply conv operation till the second last layer, which is a FC layer
h = create_conv_layer(h, w, b)
if ((i+1) % 2 == 0):
# Apply max pool after every two conv layers
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Apply dropout
if apply_dropout:
h = tf.nn.dropout(h, self.keep_prob)
# Construct FC layers
shape = h.get_shape().as_list()
h = tf.reshape(h, [-1, shape[1] * shape[2] * shape[3]])
# Store image features
self.features = h
self.image_feature_dim = h.get_shape().as_list()[-1]
return create_fc_layer(h, weights[-1], biases[-1], apply_relu=False)
def vgg_16_conv_feedforward(self, h):
"""
Forward pass through a VGG 16 network
Return:
Logits of a VGG 16 network
"""
self.trainable_vars = []
# Conv1
h = vgg_conv_layer(h, 3, 64, 1, self.trainable_vars, name='conv1_1')
h = vgg_conv_layer(h, 3, 64, 1, self.trainable_vars, name='conv1_2')
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
# Conv2
h = vgg_conv_layer(h, 3, 128, 1, self.trainable_vars, name='conv2_1')
h = vgg_conv_layer(h, 3, 128, 1, self.trainable_vars, name='conv2_2')
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# Conv3
h = vgg_conv_layer(h, 3, 256, 1, self.trainable_vars, name='conv3_1')
h = vgg_conv_layer(h, 3, 256, 1, self.trainable_vars, name='conv3_2')
h = vgg_conv_layer(h, 3, 256, 1, self.trainable_vars, name='conv3_3')
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3')
# Conv4
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv4_1')
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv4_2')
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv4_3')
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4')
# Conv5
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv5_1')
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv5_2')
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv5_3')
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool5')
# FC layers
shape = h.get_shape().as_list()
h = tf.reshape(h, [-1, shape[1] * shape[2] * shape[3]])
# fc6
h = vgg_fc_layer(h, 4096, self.trainable_vars, apply_relu=True, name='fc6')
# fc7
h = vgg_fc_layer(h, 4096, self.trainable_vars, apply_relu=True, name='fc7')
# Store image features
self.features = h
self.image_feature_dim = h.get_shape().as_list()[-1]
# fc8
if self.class_attr is not None:
# Return the image features
return h
else:
logits = vgg_fc_layer(h, self.total_classes, self.trainable_vars, apply_relu=False, name='fc8')
return logits
def resnet18_conv_feedforward(self, h, kernels, filters, strides):
"""
Forward pass through a ResNet-18 network
Returns:
Logits of a resnet-18 conv network
"""
self.trainable_vars = []
# Conv1
h = _conv(h, kernels[0], filters[0], strides[0], self.trainable_vars, name='conv_1')
h = _bn(h, self.trainable_vars, self.train_phase, name='bn_1')
h = tf.nn.relu(h)
# Conv2_x
h = _residual_block(h, self.trainable_vars, self.train_phase, name='conv2_1')
h = _residual_block(h, self.trainable_vars, self.train_phase, name='conv2_2')
# Conv3_x
h = _residual_block_first(h, filters[2], strides[2], self.trainable_vars, self.train_phase, name='conv3_1', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars, self.train_phase, name='conv3_2')
# Conv4_x
h = _residual_block_first(h, filters[3], strides[3], self.trainable_vars, self.train_phase, name='conv4_1', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars, self.train_phase, name='conv4_2')
# Conv5_x
h = _residual_block_first(h, filters[4], strides[4], self.trainable_vars, self.train_phase, name='conv5_1', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars, self.train_phase, name='conv5_2')
# Apply average pooling
h = tf.reduce_mean(h, [1, 2])
# Store the feature mappings
self.features = h
self.image_feature_dim = h.get_shape().as_list()[-1]
if self.class_attr is not None:
# Return the image features
return h
else:
if self.network_arch == 'RESNET-S':
logits = _fc(h, self.total_classes, self.trainable_vars, name='fc_1', is_cifar=True)
else:
logits = _fc(h, self.total_classes, self.trainable_vars, name='fc_1')
return logits
def get_attribute_embedding(self, attr):
"""
Get attribute embedding using a simple FC network
Returns:
Embedding vector of k x ATTR_DIMS
"""
w = weight_variable([self.attr_dims, self.image_feature_dim], name='attr_embed_w')
self.trainable_vars.append(w)
# Return the inner product of attribute matrix and weight vector.
return tf.matmul(attr, w) # Dimension should be TOTAL_CLASSES x image_feature_dim
def loss_and_gradients(self, imp_method):
"""
Defines task based and surrogate losses and their
gradients
Args:
Returns:
"""
reg = 0.0
if imp_method == 'VAN' or imp_method == 'PNN' or imp_method == 'ER' or 'GEM' in imp_method:
pass
elif imp_method == 'EWC' or imp_method == 'M-EWC':
reg = tf.add_n([tf.reduce_sum(tf.square(w - w_star) * f) for w, w_star,
f in zip(self.trainable_vars, self.star_vars, self.normalized_fisher_at_minima_vars)])
elif imp_method == 'PI':
reg = tf.add_n([tf.reduce_sum(tf.square(w - w_star) * f) for w, w_star,
f in zip(self.trainable_vars, self.star_vars, self.big_omega_vars)])
elif imp_method == 'MAS':
reg = tf.add_n([tf.reduce_sum(tf.square(w - w_star) * f) for w, w_star,
f in zip(self.trainable_vars, self.star_vars, self.hebbian_score_vars)])
elif imp_method == 'RWALK':
reg = tf.add_n([tf.reduce_sum(tf.square(w - w_star) * (f + scr)) for w, w_star,
f, scr in zip(self.trainable_vars, self.star_vars, self.normalized_fisher_at_minima_vars,
self.normalized_score_vars)])
"""
# ***** DON't USE THIS WITH MULTI-HEAD SETTING SINCE THIS WILL UPDATE ALL THE WEIGHTS *****
# If CNN arch, then use the weight decay
if self.is_ATT_DATASET:
self.unweighted_entropy += tf.add_n([0.0005 * tf.nn.l2_loss(v) for v in self.trainable_vars if 'weights' in v.name or 'kernel' in v.name])
"""
if imp_method == 'PNN':
# Compute the gradients of regularized loss
self.reg_gradients_vars = []
for i in range(self.num_tasks):
self.reg_gradients_vars.append([])
self.reg_gradients_vars[i] = self.opt.compute_gradients(self.unweighted_entropy[i], var_list=self.trainable_vars[i])
elif imp_method != 'A-GEM': # For A-GEM we will define the losses and gradients later on
if imp_method == 'ER' and 'FC-' not in self.network_arch:
self.reg_loss = tf.add_n([self.unweighted_entropy[i] for i in range(self.num_tasks)])/ self.mem_batch_size
else:
# Regularized training loss
self.reg_loss = tf.squeeze(self.unweighted_entropy + self.synap_stgth * reg)
# Compute the gradients of the vanilla loss
self.vanilla_gradients_vars = self.opt.compute_gradients(self.unweighted_entropy,
var_list=self.trainable_vars)
# Compute the gradients of regularized loss
self.reg_gradients_vars = self.opt.compute_gradients(self.reg_loss,
var_list=self.trainable_vars)
def train_op(self):
"""
Defines the training operation (a single step during training)
Args:
Returns:
"""
if self.imp_method == 'VAN' or self.imp_method == 'ER':
# Define training operation
self.train = self.opt.apply_gradients(self.reg_gradients_vars, global_step=self.global_step)
elif self.imp_method == 'PNN':
# Define training operation
self.train = [self.opt.apply_gradients(self.reg_gradients_vars[i]) for i in range(self.num_tasks)]
elif self.imp_method == 'FTR_EXT':
# Define a training operation for the first and subsequent tasks
self.train = self.opt.apply_gradients(self.reg_gradients_vars)
self.train_classifier = self.opt.apply_gradients(self.reg_gradients_vars[-2:])
else:
# Get the value of old weights first
with tf.control_dependencies([self.weights_old_ops_grouped]):
# Define a training operation
self.train = self.opt.apply_gradients(self.reg_gradients_vars, global_step=self.global_step)
def init_vars(self):
"""
Defines different variables that will be used for the
weight consolidation
Args:
Returns:
"""
if self.imp_method == 'PNN':
return
for v in range(len(self.trainable_vars)):
# List of variables for weight updates
self.weights_old.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.weights_delta_old_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.star_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False,
name=self.trainable_vars[v].name.rsplit(':')[0]+'_star'))
# List of variables for pathint method
self.small_omega_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.big_omega_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.big_omega_riemann_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
# List of variables to store fisher information
self.fisher_diagonal_at_minima.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.normalized_fisher_at_minima_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False, dtype=tf.float32))
self.tmp_fisher_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.running_fisher_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.score_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
# New variables for conv setting for fisher and score normalization
self.max_fisher_vars.append(tf.Variable(tf.zeros(1), dtype=tf.float32, trainable=False))
self.min_fisher_vars.append(tf.Variable(tf.zeros(1), dtype=tf.float32, trainable=False))
self.max_score_vars.append(tf.Variable(tf.zeros(1), dtype=tf.float32, trainable=False))
self.min_score_vars.append(tf.Variable(tf.zeros(1), dtype=tf.float32, trainable=False))
self.normalized_score_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
if self.imp_method == 'MAS':
# List of variables to store hebbian information
self.hebbian_score_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
elif self.imp_method == 'A-GEM' or self.imp_method == 'S-GEM':
self.ref_grads.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.projected_gradients_list.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
def get_current_weights(self):
"""
Get the values of current weights
Note: These weights are different from star_vars as those
store the weights after training for the last task.
Args:
Returns:
"""
weights_old_ops = []
weights_delta_old_ops = []
for v in range(len(self.trainable_vars)):
weights_old_ops.append(tf.assign(self.weights_old[v], self.trainable_vars[v]))
weights_delta_old_ops.append(tf.assign(self.weights_delta_old_vars[v], self.trainable_vars[v]))
self.weights_old_ops_grouped = tf.group(*weights_old_ops)
self.weights_delta_old_grouped = tf.group(*weights_delta_old_ops)
def weights_store_ops(self):
"""
Defines weight restoration operations
Args:
Returns:
"""
restore_weights_ops = []
set_star_vars_ops = []
for v in range(len(self.trainable_vars)):
restore_weights_ops.append(tf.assign(self.trainable_vars[v], self.star_vars[v]))
set_star_vars_ops.append(tf.assign(self.star_vars[v], self.trainable_vars[v]))
self.restore_weights = tf.group(*restore_weights_ops)
self.set_star_vars = tf.group(*set_star_vars_ops)
def reset_optimizer_ops(self):
"""
Defines operations to reset the optimizer
Args:
Returns:
"""
# Set the operation for resetting the optimizer
self.optimizer_slots = [self.opt.get_slot(var, name) for name in self.opt.get_slot_names()\
for var in tf.global_variables() if self.opt.get_slot(var, name) is not None]
self.slot_names = self.opt.get_slot_names()
self.opt_init_op = tf.variables_initializer(self.optimizer_slots)
def create_pathint_ops(self):
"""
Defines operations for path integral-based importance
Args:
Returns:
"""
reset_small_omega_ops = []
update_small_omega_ops = []
update_big_omega_ops = []
update_big_omega_riemann_ops = []
for v in range(len(self.trainable_vars)):
# Make sure that the variables are updated before calculating delta(theta)
with tf.control_dependencies([self.train]):
update_small_omega_ops.append(tf.assign_add(self.small_omega_vars[v],
-(self.vanilla_gradients_vars[v][0] * (self.trainable_vars[v] - self.weights_old[v]))))
# Ops to reset the small omega
reset_small_omega_ops.append(tf.assign(self.small_omega_vars[v], self.small_omega_vars[v]*0.0))
if self.imp_method == 'PI':
# Update the big omegas at the end of the task using the Eucldeian distance
update_big_omega_ops.append(tf.assign_add(self.big_omega_vars[v],
tf.nn.relu(tf.div(self.small_omega_vars[v], (PARAM_XI_STEP + tf.square(self.trainable_vars[v] - self.star_vars[v]))))))
elif self.imp_method == 'RWALK':
# Update the big omegas after small intervals using distance in riemannian manifold (KL-divergence)
update_big_omega_riemann_ops.append(tf.assign_add(self.big_omega_riemann_vars[v],
tf.nn.relu(tf.div(self.small_omega_vars[v],
(PARAM_XI_STEP + self.running_fisher_vars[v] * tf.square(self.trainable_vars[v] - self.weights_delta_old_vars[v]))))))
self.update_small_omega = tf.group(*update_small_omega_ops)
self.reset_small_omega = tf.group(*reset_small_omega_ops)
if self.imp_method == 'PI':
self.update_big_omega = tf.group(*update_big_omega_ops)
elif self.imp_method == 'RWALK':
self.update_big_omega_riemann = tf.group(*update_big_omega_riemann_ops)
self.big_omega_riemann_reset = [tf.assign(tensor, tf.zeros_like(tensor)) for tensor in self.big_omega_riemann_vars]
if self.imp_method == 'RWALK':
# For the first task, scale the scores so that division does not have an effect
self.scale_score = [tf.assign(s, s*2.0) for s in self.big_omega_riemann_vars]
# To reduce the rigidity after each task the importance scores are averaged
self.update_score = [tf.assign_add(scr, tf.div(tf.add(scr, riemm_omega), 2.0))
for scr, riemm_omega in zip(self.score_vars, self.big_omega_riemann_vars)]
# Get the min and max in each layer of the scores
self.get_max_score_vars = [tf.assign(var, tf.expand_dims(tf.squeeze(tf.reduce_max(scr, keepdims=True)),
axis=0)) for var, scr in zip(self.max_score_vars, self.score_vars)]
self.get_min_score_vars = [tf.assign(var, tf.expand_dims(tf.squeeze(tf.reduce_min(scr, keepdims=True)),
axis=0)) for var, scr in zip(self.min_score_vars, self.score_vars)]
self.max_score = tf.reduce_max(tf.convert_to_tensor(self.max_score_vars))
self.min_score = tf.reduce_min(tf.convert_to_tensor(self.min_score_vars))
with tf.control_dependencies([self.max_score, self.min_score]):
self.normalize_scores = [tf.assign(tgt, (var - self.min_score)/ (self.max_score - self.min_score + EPSILON))
for tgt, var in zip(self.normalized_score_vars, self.score_vars)]
# Sparsify all the layers except last layer
sparsify_score_ops = []
for v in range(len(self.normalized_score_vars) - 2):
sparsify_score_ops.append(tf.assign(self.normalized_score_vars[v],
tf.nn.dropout(self.normalized_score_vars[v], self.keep_prob)))
self.sparsify_scores = tf.group(*sparsify_score_ops)
def create_fisher_ops(self):
"""
Defines the operations to compute online update of Fisher
Args:
Returns:
"""
ders = tf.gradients(self.unweighted_entropy, self.trainable_vars)
fisher_ema_at_step_ops = []
fisher_accumulate_at_step_ops = []
# ops for running fisher
self.set_tmp_fisher = [tf.assign_add(f, tf.square(d)) for f, d in zip(self.tmp_fisher_vars, ders)]
# Initialize the running fisher to non-zero value
self.set_initial_running_fisher = [tf.assign(r_f, s_f) for r_f, s_f in zip(self.running_fisher_vars,
self.tmp_fisher_vars)]
self.set_running_fisher = [tf.assign(f, (1 - self.fisher_ema_decay) * f + (1.0/ self.fisher_update_after) *
self.fisher_ema_decay * tmp) for f, tmp in zip(self.running_fisher_vars, self.tmp_fisher_vars)]
self.get_fisher_at_minima = [tf.assign(var, f) for var, f in zip(self.fisher_diagonal_at_minima,
self.running_fisher_vars)]
self.reset_tmp_fisher = [tf.assign(tensor, tf.zeros_like(tensor)) for tensor in self.tmp_fisher_vars]
# Get the min and max in each layer of the Fisher
self.get_max_fisher_vars = [tf.assign(var, tf.expand_dims(tf.squeeze(tf.reduce_max(scr, keepdims=True)), axis=0))
for var, scr in zip(self.max_fisher_vars, self.fisher_diagonal_at_minima)]
self.get_min_fisher_vars = [tf.assign(var, tf.expand_dims(tf.squeeze(tf.reduce_min(scr, keepdims=True)), axis=0))
for var, scr in zip(self.min_fisher_vars, self.fisher_diagonal_at_minima)]
self.max_fisher = tf.reduce_max(tf.convert_to_tensor(self.max_fisher_vars))
self.min_fisher = tf.reduce_min(tf.convert_to_tensor(self.min_fisher_vars))
with tf.control_dependencies([self.max_fisher, self.min_fisher]):
self.normalize_fisher_at_minima = [tf.assign(tgt,
(var - self.min_fisher)/ (self.max_fisher - self.min_fisher + EPSILON))
for tgt, var in zip(self.normalized_fisher_at_minima_vars, self.fisher_diagonal_at_minima)]
self.clear_attr_embed_reg = tf.assign(self.normalized_fisher_at_minima_vars[-2], tf.zeros_like(self.normalized_fisher_at_minima_vars[-2]))
# Sparsify all the layers except last layer
sparsify_fisher_ops = []
for v in range(len(self.normalized_fisher_at_minima_vars) - 2):
sparsify_fisher_ops.append(tf.assign(self.normalized_fisher_at_minima_vars[v],
tf.nn.dropout(self.normalized_fisher_at_minima_vars[v], self.keep_prob)))
self.sparsify_fisher = tf.group(*sparsify_fisher_ops)
def combined_fisher_pathint_ops(self):
"""
Define the operations to refine Fisher information based on parameters convergence
Args:
Returns:
"""
#self.refine_fisher_at_minima = [tf.assign(f, f*(1.0/(s+1e-12))) for f, s in zip(self.fisher_diagonal_at_minima, self.small_omega_vars)]
self.refine_fisher_at_minima = [tf.assign(f, f*tf.exp(-100.0*s)) for f, s in zip(self.fisher_diagonal_at_minima, self.small_omega_vars)]
def create_hebbian_ops(self):
"""
Define operations for hebbian measure of importance (MAS)
"""
# Compute the gradients of mse loss
self.mse_gradients = tf.gradients(self.mse, self.trainable_vars)
#with tf.control_dependencies([self.mse_gradients]):
# Keep on adding gradients to the omega
self.accumulate_hebbian_scores = [tf.assign_add(omega, tf.abs(grad)) for omega, grad in zip(self.hebbian_score_vars, self.mse_gradients)]
# Average across the total images
self.average_hebbian_scores = [tf.assign(omega, omega*(1.0/self.train_samples)) for omega in self.hebbian_score_vars]
# Reset the hebbian importance variables
self.reset_hebbian_scores = [tf.assign(omega, tf.zeros_like(omega)) for omega in self.hebbian_score_vars]
def create_stochastic_gem_ops(self):
"""
Define operations for Stochastic GEM
"""
if 'FC-' in self.network_arch or self.imp_method == 'S-GEM':
self.agem_loss = self.unweighted_entropy
else:
self.agem_loss = tf.add_n([self.unweighted_entropy[i] for i in range(self.num_tasks)])/ self.mem_batch_size
ref_grads = tf.gradients(self.agem_loss, self.trainable_vars)
# Reference gradient for previous tasks
self.store_ref_grads = [tf.assign(ref, grad) for ref, grad in zip(self.ref_grads, ref_grads)]
flat_ref_grads = tf.concat([tf.reshape(grad, [-1]) for grad in self.ref_grads], 0)
# Grandient on the current task
task_grads = tf.gradients(self.agem_loss, self.trainable_vars)
flat_task_grads = tf.concat([tf.reshape(grad, [-1]) for grad in task_grads], 0)
with tf.control_dependencies([flat_task_grads]):
dotp = tf.reduce_sum(tf.multiply(flat_task_grads, flat_ref_grads))
ref_mag = tf.reduce_sum(tf.multiply(flat_ref_grads, flat_ref_grads))
proj = flat_task_grads - ((dotp/ ref_mag) * flat_ref_grads)
self.reset_violation_count = self.violation_count.assign(0)
def increment_violation_count():
with tf.control_dependencies([tf.assign_add(self.violation_count, 1)]):
return tf.identity(self.violation_count)
self.violation_count = tf.cond(tf.greater_equal(dotp, 0), lambda: tf.identity(self.violation_count), increment_violation_count)
projected_gradients = tf.cond(tf.greater_equal(dotp, 0), lambda: tf.identity(flat_task_grads), lambda: tf.identity(proj))
# Convert the flat projected gradient vector into a list
offset = 0
store_proj_grad_ops = []
for v in self.projected_gradients_list:
shape = v.get_shape()
v_params = 1
for dim in shape:
v_params *= dim.value
store_proj_grad_ops.append(tf.assign(v, tf.reshape(projected_gradients[offset:offset+v_params], shape)))
offset += v_params
self.store_proj_grads = tf.group(*store_proj_grad_ops)
# Define training operations for the tasks > 1
with tf.control_dependencies([self.store_proj_grads]):
self.train_subseq_tasks = self.opt.apply_gradients(zip(self.projected_gradients_list, self.trainable_vars), global_step=self.global_step)
# Define training operations for the first task
self.first_task_gradients_vars = self.opt.compute_gradients(self.agem_loss, var_list=self.trainable_vars)
self.train_first_task = self.opt.apply_gradients(self.first_task_gradients_vars, global_step=self.global_step)
#################################################################################
#### External APIs of the class. These will be called/ exposed externally #######
#################################################################################
def reset_optimizer(self, sess):
"""
Resets the optimizer state
Args:
sess TF session
Returns:
"""
# Call the reset optimizer op
sess.run(self.opt_init_op)
def set_active_outputs(self, sess, labels):
"""
Set the mask for the labels seen so far
Args:
sess TF session
labels Mask labels
Returns:
"""
new_mask = np.zeros(self.total_classes)
new_mask[labels] = 1.0
"""
for l in labels:
new_mask[l] = 1.0
"""
sess.run(self.output_mask.assign(new_mask))
def init_updates(self, sess):
"""
Initialization updates
Args:
sess TF session
Returns:
"""
# Set the star values to the initial weights, so that we can calculate
# big_omegas reliably
if self.imp_method != 'PNN':
sess.run(self.set_star_vars)
def task_updates(self, sess, task, train_x, train_labels, num_classes_per_task=10, class_attr=None, online_cross_val=False):
"""
Updates different variables when a task is completed
Args:
sess TF session
task Task ID
train_x Training images for the task
train_labels Labels in the task
class_attr Class attributes (only needed for ZST transfer)
Returns:
"""
if self.imp_method == 'VAN' or self.imp_method == 'PNN':
# We'll store the current parameters at the end of this function
pass
elif self.imp_method == 'EWC':
# Get the fisher at the end of a task
sess.run(self.get_fisher_at_minima)
# Normalize the fisher
sess.run([self.get_max_fisher_vars, self.get_min_fisher_vars])
sess.run([self.min_fisher, self.max_fisher, self.normalize_fisher_at_minima])
# Don't regularize over the attribute-embedding vectors
#sess.run(self.clear_attr_embed_reg)
# Reset the tmp fisher vars
sess.run(self.reset_tmp_fisher)
elif self.imp_method == 'M-EWC':
# Get the fisher at the end of a task
sess.run(self.get_fisher_at_minima)
# Refine Fisher based on the convergence info
sess.run(self.refine_fisher_at_minima)
# Normalize the fisher
sess.run([self.get_max_fisher_vars, self.get_min_fisher_vars])
sess.run([self.min_fisher, self.max_fisher, self.normalize_fisher_at_minima])
# Reset the tmp fisher vars
sess.run(self.reset_tmp_fisher)
# Reset the small_omega_vars
sess.run(self.reset_small_omega)
elif self.imp_method == 'PI':
# Update big omega variables
sess.run(self.update_big_omega)
# Reset the small_omega_vars because big_omega_vars are updated before it
sess.run(self.reset_small_omega)
elif self.imp_method == 'RWALK':
if task == 0:
# If first task then scale by a factor of 2, so that subsequent averaging does not hurt
sess.run(self.scale_score)
# Get the updated importance score
sess.run(self.update_score)
# Normalize the scores
sess.run([self.get_max_score_vars, self.get_min_score_vars])
sess.run([self.min_score, self.max_score, self.normalize_scores])
# Sparsify scores
"""
# TODO: Tmp remove this?
kp = 0.8 + (task*0.5)
if (kp > 1):
kp = 1.0
"""
#sess.run(self.sparsify_scores, feed_dict={self.keep_prob: kp})
# Get the fisher at the end of a task
sess.run(self.get_fisher_at_minima)
# Normalize fisher
sess.run([self.get_max_fisher_vars, self.get_min_fisher_vars])
sess.run([self.min_fisher, self.max_fisher, self.normalize_fisher_at_minima])
# Sparsify fisher
#sess.run(self.sparsify_fisher, feed_dict={self.keep_prob: kp})
# Store the weights
sess.run(self.weights_delta_old_grouped)
# Reset the small_omega_vars because big_omega_vars are updated before it
sess.run(self.reset_small_omega)
# Reset the big_omega_riemann because importance score is stored in the scores array
sess.run(self.big_omega_riemann_reset)
# Reset the tmp fisher vars
sess.run(self.reset_tmp_fisher)
elif self.imp_method == 'MAS':
# zero out any previous values
sess.run(self.reset_hebbian_scores)
if self.class_attr is not None:
# Define mask based on the class attributes
masked_class_attrs = np.zeros_like(class_attr)
masked_class_attrs[train_labels] = class_attr[train_labels]
# Logits mask
logit_mask = np.zeros(self.total_classes)
logit_mask[train_labels] = 1.0
# Loop over the entire training dataset to compute the parameter importance
batch_size = 10
num_samples = train_x.shape[0]
for iters in range(num_samples// batch_size):
offset = iters * batch_size
if self.class_attr is not None:
sess.run(self.accumulate_hebbian_scores, feed_dict={self.x: train_x[offset:offset+batch_size], self.keep_prob: 1.0,
self.class_attr: masked_class_attrs, self.output_mask: logit_mask, self.train_phase: False})
else:
sess.run(self.accumulate_hebbian_scores, feed_dict={self.x: train_x[offset:offset+batch_size], self.keep_prob: 1.0,
self.output_mask: logit_mask, self.train_phase: False})
# Average the hebbian scores across the training examples
sess.run(self.average_hebbian_scores, feed_dict={self.train_samples: num_samples})
# Store current weights
self.init_updates(sess)
def restore(self, sess):
"""
Restore the weights from the star variables
Args:
sess TF session
Returns:
"""
sess.run(self.restore_weights) | 66,852 | 48.520741 | 223 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/continual_learning_algorithms/model/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .model import Model
| 219 | 30.428571 | 70 | py |
snare | snare-master/train.py | import os
from pathlib import Path
import hydra
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
import numpy as np
import random
import torch
import models
from data.dataset import CLIPGraspingDataset
from torch.utils.data import DataLoader
@hydra.main(config_path="cfgs", config_name="train")
def main(cfg):
# set random seeds
seed = cfg['train']['random_seed']
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
hydra_dir = Path(os.getcwd())
checkpoint_path = hydra_dir / 'checkpoints'
last_checkpoint_path = os.path.join(checkpoint_path, 'last.ckpt')
last_checkpoint = last_checkpoint_path \
if os.path.exists(last_checkpoint_path) and cfg['train']['load_from_last_ckpt'] else None
checkpoint_callback = ModelCheckpoint(
monitor=cfg['wandb']['saver']['monitor'],
dirpath=checkpoint_path,
filename='{epoch:04d}-{val_acc:.5f}',
save_top_k=1,
save_last=True,
)
trainer = Trainer(
gpus=[0],
fast_dev_run=cfg['debug'],
checkpoint_callback=checkpoint_callback,
max_epochs=cfg['train']['max_epochs'],
)
# dataset
train = CLIPGraspingDataset(cfg, mode='train')
valid = CLIPGraspingDataset(cfg, mode='valid')
test = CLIPGraspingDataset(cfg, mode='test')
# model
model = models.names[cfg['train']['model']](cfg, train, valid)
# resume epoch and global_steps
if last_checkpoint and cfg['train']['load_from_last_ckpt']:
print(f"Resuming: {last_checkpoint}")
last_ckpt = torch.load(last_checkpoint)
trainer.current_epoch = last_ckpt['epoch']
trainer.global_step = last_ckpt['global_step']
del last_ckpt
trainer.fit(
model,
train_dataloader=DataLoader(train, batch_size=cfg['train']['batch_size']),
val_dataloaders=DataLoader(valid, batch_size=cfg['train']['batch_size']),
)
trainer.test(
test_dataloaders=DataLoader(test, batch_size=cfg['train']['batch_size']),
ckpt_path='best'
)
if __name__ == "__main__":
main()
| 2,139 | 28.315068 | 97 | py |
snare | snare-master/models/single_cls.py | import numpy as np
import json
import os
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_lightning import LightningModule
import wandb
import models.aggregator as agg
class SingleClassifier(LightningModule):
def __init__(self, cfg, train_ds, val_ds):
self.optimizer = None
super().__init__()
self.cfg = cfg
self.train_ds = train_ds
self.val_ds = val_ds
self.dropout = self.cfg['train']['dropout']
# input dimensions
self.feats_backbone = self.cfg['train']['feats_backbone']
self.img_feat_dim = 512
self.lang_feat_dim = 512
self.num_views = 8
# choose aggregation method
agg_cfg = dict(self.cfg['train']['aggregator'])
agg_cfg['input_dim'] = self.img_feat_dim
self.aggregator_type = self.cfg['train']['aggregator']['type']
self.aggregator = agg.names[self.aggregator_type](agg_cfg)
# build network
self.build_model()
# val progress
self.best_val_acc = -1.0
self.best_val_res = None
# test progress
self.best_test_acc = -1.0
self.best_test_res = None
# results save path
self.save_path = Path(os.getcwd())
# log with wandb
self.log_data = self.cfg['train']['log']
if self.log_data:
self.run = wandb.init(
project=self._cfg['wandb']['logger']['project'],
config=self._cfg['train'],
settings=wandb.Settings(show_emoji=False),
reinit=True
)
wandb.run.name = self._cfg['wandb']['logger']['run_name']
def build_model(self):
# image encoder
self.img_fc = nn.Sequential(
nn.Identity()
)
# language encoder
self.lang_fc = nn.Sequential(
nn.Identity()
)
# finetuning layers for classification
self.cls_fc = nn.Sequential(
nn.Linear(self.img_feat_dim+self.lang_feat_dim, 512),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(512, 256),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(256, 1),
)
def configure_optimizers(self):
self.optimizer = torch.optim.Adam(self.parameters(), lr=self.cfg['train']['lr'])
return self.optimizer
def smoothed_cross_entropy(self, pred, target, alpha=0.1):
# From ShapeGlot (Achlioptas et. al)
# https://github.com/optas/shapeglot/blob/master/shapeglot/models/neural_utils.py
n_class = pred.size(1)
one_hot = target
one_hot = one_hot * ((1.0 - alpha) + alpha / n_class) + (1.0 - one_hot) * alpha / n_class # smoothed
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).sum(dim=1)
return torch.mean(loss)
def _criterion(self, out):
probs = out['probs']
labels = out['labels']
loss = self.smoothed_cross_entropy(probs, labels)
return {
'loss': loss
}
def forward(self, batch):
(img1_n_feats, img2_n_feats), lang_feats, ans, (key1, key2), annotation, is_visual = batch
# to device
img1_n_feats = img1_n_feats.to(device=self.device).float()
img2_n_feats = img2_n_feats.to(device=self.device).float()
lang_feats = lang_feats.to(device=self.device).float()
# aggregate
img1_feats = self.aggregator(img1_n_feats)
img2_feats = self.aggregator(img2_n_feats)
# lang encoding
lang_enc = self.lang_fc(lang_feats)
# normalize
if self.cfg['train']['normalize_feats']:
img1_feats = img1_feats / img1_feats.norm(dim=-1, keepdim=True)
img2_feats = img2_feats / img2_feats.norm(dim=-1, keepdim=True)
lang_enc = lang_enc / lang_enc.norm(dim=-1, keepdim=True)
# img1 prob
img1_enc = self.img_fc(img1_feats)
img1_prob = self.cls_fc(torch.cat([img1_enc, lang_enc], dim=-1))
# img2 prob
img2_enc = self.img_fc(img2_feats)
img2_prob = self.cls_fc(torch.cat([img2_enc, lang_enc], dim=-1))
# cat probs
probs = torch.cat([img1_prob, img2_prob], dim=-1)
# num steps taken (8 for all views)
bs = lang_enc.shape[0]
num_steps = torch.ones((bs)).to(dtype=torch.long, device=lang_enc.device)
if self.aggregator_type in ['maxpool', 'mean', 'gru']:
num_steps = num_steps * 8
elif self.aggregator_type in ['two_random_index']:
num_steps = num_steps * 2
test_mode = (ans[0] == -1)
if not test_mode:
# one-hot labels of answers
labels = F.one_hot(ans)
return {
'probs': probs,
'labels': labels,
'is_visual': is_visual,
'num_steps': num_steps,
}
else:
return {
'probs': probs,
'num_steps': num_steps,
}
def training_step(self, batch, batch_idx):
out = self.forward(batch)
# classifier loss
losses = self._criterion(out)
if self.log_data:
wandb.log({
'tr/loss': losses['loss'],
})
return dict(
loss=losses['loss']
)
def check_correct(self, b, labels, probs):
right_prob = probs[b][labels[b].argmax()]
wrong_prob = probs[b][labels[b].argmin()]
correct = right_prob > wrong_prob
return correct
def validation_step(self, batch, batch_idx):
all_view_results = {}
for view in range(self.num_views):
out = self.forward(batch)
losses = self._criterion(out)
loss = losses['loss']
probs = out['probs']
labels = out['labels']
visual = out['is_visual']
num_steps = out['num_steps']
probs = F.softmax(probs, dim=-1)
metrics = self.compute_metrics(labels, loss, probs, visual, num_steps)
all_view_results[view] = metrics
mean_val_loss = np.mean([m['val_loss'].detach().cpu().float() for m in all_view_results.values()])
mean_val_acc = np.mean([m['val_acc'] for m in all_view_results.values()])
return dict(
val_loss=mean_val_loss,
val_acc=mean_val_acc,
all_view_results=all_view_results,
)
def compute_metrics(self, labels, loss, probs, visual, num_steps):
batch_size = probs.shape[0]
val_total, val_correct, val_pl_correct = 0, 0, 0.
visual_total, visual_correct, pl_visual_correct = 0, 0, 0.
nonvis_total, nonvis_correct, pl_nonvis_correct = 0, 0, 0.
for b in range(batch_size):
correct = self.check_correct(b, labels, probs)
if correct:
val_correct += 1
val_pl_correct += 1. / num_steps[b]
val_total += 1
if bool(visual[b]):
if correct:
visual_correct += 1
pl_visual_correct += 1. / num_steps[b]
visual_total += 1
else:
if correct:
nonvis_correct += 1
pl_nonvis_correct += 1. / num_steps[b]
nonvis_total += 1
val_acc = float(val_correct) / val_total
val_pl_acc = float(val_pl_correct) / val_total
val_visual_acc = float(visual_correct) / visual_total
val_pl_visual_acc = float(pl_visual_correct) / visual_total
val_nonvis_acc = float(nonvis_correct) / nonvis_total
val_pl_nonvis_acc = float(pl_nonvis_correct) / nonvis_total
return dict(
val_loss=loss,
val_acc=val_acc,
val_pl_acc=val_pl_acc,
val_correct=val_correct,
val_pl_correct=val_pl_correct,
val_total=val_total,
val_visual_acc=val_visual_acc,
val_pl_visual_acc=val_pl_visual_acc,
val_visual_correct=visual_correct,
val_pl_visual_correct=pl_visual_correct,
val_visual_total=visual_total,
val_nonvis_acc=val_nonvis_acc,
val_pl_nonvis_acc=val_pl_nonvis_acc,
val_nonvis_correct=nonvis_correct,
val_pl_nonvis_correct=pl_nonvis_correct,
val_nonvis_total=nonvis_total,
)
def validation_epoch_end(self, all_outputs, mode='vl'):
n_view_res = {}
sanity_check = True
for view in range(self.num_views):
view_res = {
'val_loss': 0.0,
'val_correct': 0,
'val_pl_correct': 0,
'val_total': 0,
'val_visual_correct': 0,
'val_pl_visual_correct': 0,
'val_visual_total': 0,
'val_nonvis_correct': 0,
'val_pl_nonvis_correct': 0,
'val_nonvis_total': 0,
}
for output in all_outputs:
metrics = output['all_view_results'][view]
view_res['val_loss'] += metrics['val_loss'].item()
view_res['val_correct'] += metrics['val_correct']
view_res['val_pl_correct'] += int(metrics['val_pl_correct'])
view_res['val_total'] += metrics['val_total']
if view_res['val_total'] > 128:
sanity_check = False
view_res['val_visual_correct'] += metrics['val_visual_correct']
view_res['val_pl_visual_correct'] += int(metrics['val_pl_visual_correct'])
view_res['val_visual_total'] += metrics['val_visual_total']
view_res['val_nonvis_correct'] += metrics['val_nonvis_correct']
view_res['val_pl_nonvis_correct'] += int(metrics['val_pl_nonvis_correct'])
view_res['val_nonvis_total'] += metrics['val_nonvis_total']
view_res['val_loss'] = float(view_res['val_loss']) / len(all_outputs)
view_res['val_acc'] = float(view_res['val_correct']) / view_res['val_total']
view_res['val_pl_acc'] = float(view_res['val_pl_correct']) / view_res['val_total']
view_res['val_visual_acc'] = float(view_res['val_visual_correct']) / view_res['val_visual_total']
view_res['val_pl_visual_acc'] = float(view_res['val_pl_visual_correct']) / view_res['val_visual_total']
view_res['val_nonvis_acc'] = float(view_res['val_nonvis_correct']) / view_res['val_nonvis_total']
view_res['val_pl_nonvis_acc'] = float(view_res['val_pl_nonvis_correct']) / view_res['val_nonvis_total']
n_view_res[view] = view_res
mean_val_loss = np.mean([r['val_loss'] for r in n_view_res.values()])
val_acc = sum([r['val_correct'] for r in n_view_res.values()]) / float(sum([r['val_total'] for r in n_view_res.values()]))
val_visual_acc = sum([r['val_visual_correct'] for r in n_view_res.values()]) / float(sum([r['val_visual_total'] for r in n_view_res.values()]))
val_nonvis_acc = sum([r['val_nonvis_correct'] for r in n_view_res.values()]) / float(sum([r['val_nonvis_total'] for r in n_view_res.values()]))
val_pl_acc = sum([r['val_pl_correct'] for r in n_view_res.values()]) / float(sum([r['val_total'] for r in n_view_res.values()]))
val_pl_visual_acc = sum([r['val_pl_visual_correct'] for r in n_view_res.values()]) / float(sum([r['val_visual_total'] for r in n_view_res.values()]))
val_pl_nonvis_acc = sum([r['val_pl_nonvis_correct'] for r in n_view_res.values()]) / float(sum([r['val_nonvis_total'] for r in n_view_res.values()]))
res = {
f'{mode}/loss': mean_val_loss,
f'{mode}/acc': val_acc,
f'{mode}/acc_visual': val_visual_acc,
f'{mode}/acc_nonvis': val_nonvis_acc,
f'{mode}/pl_acc': val_pl_acc,
f'{mode}/pl_acc_visual': val_pl_visual_acc,
f'{mode}/pl_acc_nonvis': val_pl_nonvis_acc,
f'{mode}/all_view_res': n_view_res,
}
if not sanity_check: # only check best conditions and dump data if this isn't a sanity check
# test (ran once at the end of training)
if mode == 'test':
self.best_test_res = dict(res)
# val (keep track of best results)
else:
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
self.best_val_res = dict(res)
# results to save
results_dict = self.best_test_res if mode == 'test' else self.best_val_res
best_loss = results_dict[f'{mode}/loss']
best_acc = results_dict[f'{mode}/acc']
best_acc_visual = results_dict[f'{mode}/acc_visual']
best_acc_nonvis = results_dict[f'{mode}/acc_nonvis']
best_pl_acc = results_dict[f'{mode}/pl_acc']
best_pl_acc_visual = results_dict[f'{mode}/pl_acc_visual']
best_pl_acc_nonvis = results_dict[f'{mode}/pl_acc_nonvis']
seed = self.cfg['train']['random_seed']
json_file = os.path.join(self.save_path, f'{mode}-results-{seed}.json')
# save results
with open(json_file, 'w') as f:
json.dump(results_dict, f, sort_keys=True, indent=4)
# print best result
print("\nBest-----:")
print(f'Best {mode} Acc: {best_acc:0.5f} ({best_pl_acc:0.5f}) | Visual {best_acc_visual:0.5f} ({best_pl_acc_visual:0.5f}) | Nonvis: {best_acc_nonvis:0.5f} ({best_pl_acc_nonvis:0.5f}) | Val Loss: {best_loss:0.8f} ')
print("------------")
if self.log_data:
wandb.log(res)
return dict(
val_loss=mean_val_loss,
val_acc=val_acc,
val_visual_acc=val_visual_acc,
val_nonvis_acc=val_nonvis_acc,
val_pl_acc=val_pl_acc,
val_pl_visual_acc=val_pl_visual_acc,
val_pl_nonvis_acc=val_pl_nonvis_acc,
)
def test_step(self, batch, batch_idx):
all_view_results = {}
for view in range(self.num_views):
out = self.forward(batch)
probs = out['probs']
num_steps = out['num_steps']
objects = batch[3]
annotation = batch[4]
probs = F.softmax(probs, dim=-1)
pred_ans = probs.argmax(-1)
all_view_results[view] = dict(
annotation=annotation,
objects=objects,
pred_ans=pred_ans,
num_steps=num_steps,
)
return dict(
all_view_results=all_view_results,
)
def test_epoch_end(self, all_outputs, mode='test'):
test_results = {v: list() for v in range(self.num_views)}
for out in all_outputs:
for view in range(self.num_views):
view_res = out['all_view_results']
bs = view_res[view]['pred_ans'].shape[0]
for b in range(bs):
test_results[view].append({
'annotation': view_res[view]['annotation'][b],
'objects': (
view_res[view]['objects'][0][b],
view_res[view]['objects'][1][b],
),
'pred_ans': int(view_res[view]['pred_ans'][b]),
'num_steps': int(view_res[view]['num_steps'][b]),
})
test_pred_save_path = self.save_path
if not os.path.exists(test_pred_save_path):
os.makedirs(test_pred_save_path)
model_type = self.__class__.__name__.lower()
json_file = os.path.join(test_pred_save_path, f'{model_type}_test_results.json')
with open(json_file, 'w') as f:
json.dump(test_results, f, sort_keys=True, indent=4)
| 16,049 | 36.066975 | 226 | py |
snare | snare-master/models/zero_shot_cls.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.single_cls import SingleClassifier
class ZeroShotClassifier(SingleClassifier):
def __init__(self, cfg, train_ds, val_ds):
super().__init__(cfg, train_ds, val_ds)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
def build_model(self):
pass
def configure_optimizers(self):
pass
def forward(self, batch):
(img1_n_feats, img2_n_feats), lang_feats, ans, (key1, key2), annotation, is_visual = batch
# to device
img1_n_feats = img1_n_feats.to(device=self.device).float()
img2_n_feats = img2_n_feats.to(device=self.device).float()
lang_feats = lang_feats.to(device=self.device).float()
# normalize
img1_n_feats = img1_n_feats / img1_n_feats.norm(dim=-1, keepdim=True)
img2_n_feats = img2_n_feats / img2_n_feats.norm(dim=-1, keepdim=True)
lang_feats = lang_feats / lang_feats.norm(dim=-1, keepdim=True)
# aggregate
img1_feats = self.aggregator(img1_n_feats)
img2_feats = self.aggregator(img2_n_feats)
bs = img1_feats.shape[0]
probs = []
for b in range(bs):
im = torch.stack([img1_feats[b], img2_feats[b]], dim=0)
lang = torch.stack([lang_feats[b], lang_feats[b]], dim=0)
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * im @ lang.t()
prob = logits_per_image[:,0].softmax(-1)
probs.append(prob)
# cat probs
probs = torch.stack(probs, dim=0)
# num steps taken (8 for all views)
bs = lang_feats.shape[0]
num_steps = torch.ones((bs)).to(dtype=torch.long, device=lang_feats.device)
num_steps = num_steps * (self.num_views if self.aggregator_type in ['maxpool', 'mean', 'gru'] else 1)
test_mode = (ans[0] == -1)
if not test_mode:
# one-hot labels of answers
labels = F.one_hot(ans)
return {
'probs': probs,
'labels': labels,
'is_visual': is_visual,
'num_steps': num_steps,
}
else:
return {
'probs': probs,
'num_steps': num_steps,
}
def training_step(self, batch, batch_idx):
# nothing to train
pass
def validation_step(self, batch, batch_idx):
all_view_results = {}
for view in range(8):
out = self.forward(batch)
losses = self._criterion(out)
loss = losses['loss']
probs = out['probs']
labels = out['labels']
visual = out['is_visual']
num_steps = out['num_steps']
metrics = self.compute_metrics(labels, loss, probs, visual, num_steps)
all_view_results[view] = metrics
mean_val_loss = np.mean([m['val_loss'].detach().cpu().float() for m in all_view_results.values()])
mean_val_acc = np.mean([m['val_acc'] for m in all_view_results.values()])
return dict(
val_loss=mean_val_loss,
val_acc=mean_val_acc,
all_view_results=all_view_results,
) | 3,277 | 31.78 | 109 | py |
snare | snare-master/models/aggregator.py | import torch
import torch.nn as nn
class MaxPool(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
def forward(self, x):
x, _ = x.max(dim=-2) # [B 14 512] -> [B 512]
return x
class MeanPool(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
def forward(self, x):
return x.mean(dim=-2) # [B 14 512] -> [B 512]
class RandomIndex(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
def forward(self, x):
batch_idxs = torch.randint(x.shape[1], (x.shape[0],)) # [B]
return x[torch.arange(0, x.shape[0], dtype=torch.long), batch_idxs] # [B 512]
class TwoRandomIndex(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
def forward(self, x):
batch_idxs_1 = torch.randint(x.shape[1], (x.shape[0],)) # [B]
x1 = x[torch.arange(0, x.shape[0], dtype=torch.long), batch_idxs_1] # [B 512]
batch_idxs_2 = torch.randint(x.shape[1], (x.shape[0],)) # [B]
x2 = x[torch.arange(0, x.shape[0], dtype=torch.long), batch_idxs_2] # [B 512]
x, _ = torch.stack([x1, x2], dim=-1).max(dim=-1) # [B 512]
return x
names = {
'meanpool': MeanPool,
'maxpool': MaxPool,
'random_index': RandomIndex,
'two_random_index': TwoRandomIndex,
} | 1,455 | 26.471698 | 86 | py |
snare | snare-master/models/rotator.py | import numpy as np
import collections
import json
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import wandb
from models.single_cls import SingleClassifier
class Rotator(SingleClassifier):
def __init__(self, cfg, train_ds, val_ds):
self.estimate_init_state = False
self.estimate_final_state = False
self.img_fc = None
self.lang_fc = None
self.cls_fc = None
self.state_fc = None
self.action_fc = None
super().__init__(cfg, train_ds, val_ds)
def build_model(self):
# image encoder
self.img_fc = nn.Sequential(
nn.Identity()
)
# language encoder
self.lang_fc = nn.Sequential(
nn.Identity()
)
# finetuning layers for classification
self.cls_fc = nn.Sequential(
nn.Linear(self.img_feat_dim+self.lang_feat_dim, 512),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(512, 256),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(256, 1),
)
# load pre-trained classifier (gets overrided if loading pre-trained rotator)
# Note: gets overrided if loading pre-trained rotator
model_path = self.cfg['train']['rotator']['pretrained_cls']
checkpoint = torch.load(model_path)
self.load_state_dict(checkpoint['state_dict'])
print(f"Loaded: {model_path}")
self.estimate_init_state = self.cfg['train']['rotator']['estimate_init_state']
self.estimate_final_state = self.cfg['train']['rotator']['estimate_final_state']
# state estimation layers
self.state_fc = nn.Sequential(
nn.Linear(self.img_feat_dim, 512),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(512, 256),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(256, 128),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(128, 64),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(64, 8)
)
# action layers
self.action_fc = nn.Sequential(
nn.Linear(self.img_feat_dim+self.lang_feat_dim, 512),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(512, 256),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(256, 128),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(128, 64),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(64, 8)
)
# load pre-trained rotator
if self.cfg['train']['pretrained_model']:
model_path = self.cfg['train']['pretrained_model']
self.load_state_dict(torch.load(model_path)['state_dict'])
print(f"Loaded: {model_path}")
def forward(self, batch, teacher_force=True, init_view_force=None):
(img1_n_feats, img2_n_feats), lang_feats, ans, (key1, key2), annotation, is_visual = batch
# estimate current view
init_state_estimation = self.estimate_state(img1_n_feats, img2_n_feats, lang_feats, init_view_force,
self.estimate_init_state)
# output variables from state estimation
bs = img1_n_feats.shape[0]
img1_n_feats = init_state_estimation['img1_n_feats']
img2_n_feats = init_state_estimation['img2_n_feats']
lang_feats = init_state_estimation['lang_feats']
init_views1 = init_state_estimation['init_views1']
init_views2 = init_state_estimation['init_views2']
est_init_views1 = init_state_estimation['est_init_views1']
est_init_views2 = init_state_estimation['est_init_views2']
loss = init_state_estimation['loss']
# choose features of ramdomly sampling viewpoints
img1_chosen_feats, img2_chosen_feats, rotated_views1, rotated_views2 = self.choose_feats_from_random_views(
bs, img1_n_feats, img2_n_feats, init_views1, init_views2)
# estimate second view before performing prediction
final_state_estimation = self.estimate_state(img1_n_feats, img2_n_feats, lang_feats,
[rotated_views1, rotated_views2], self.estimate_final_state)
est_final_views1 = final_state_estimation['est_init_views1']
est_final_views2 = final_state_estimation['est_init_views2']
loss += final_state_estimation['loss']
# classifier probablities chosen features
img1_chosen_prob = self.cls_fc(torch.cat([img1_chosen_feats, lang_feats], dim=-1))
img2_chosen_prob = self.cls_fc(torch.cat([img2_chosen_feats, lang_feats], dim=-1))
# classifier loss
raw_probs = torch.cat([img1_chosen_prob, img2_chosen_prob], dim=-1)
probs = F.softmax(raw_probs, dim=-1)
bs = lang_feats.shape[0]
num_steps = torch.ones((bs)).to(dtype=torch.long, device=lang_feats.device) * 2
test_mode = (ans[0] == -1)
if not test_mode:
# classifier loss
cls_labels = F.one_hot(ans)
cls_loss_weight = self.cfg['train']['loss']['cls_weight']
loss += (self.smoothed_cross_entropy(raw_probs, cls_labels)) * cls_loss_weight
# put rotated views on device
rotated_views1 = rotated_views1.to(device=self.device).int()
rotated_views2 = rotated_views2.to(device=self.device).int()
# state estimation accuracy
est_init_view1_corrects = int(torch.count_nonzero(est_init_views1 == init_views1))
est_init_view2_corrects = int(torch.count_nonzero(est_init_views2 == init_views2))
total_correct_init_view_est = est_init_view1_corrects + est_init_view2_corrects
est_final_view1_corrects = int(torch.count_nonzero(est_final_views1 == rotated_views1))
est_final_view2_corrects = int(torch.count_nonzero(est_final_views2 == rotated_views2))
total_correct_final_view_est = est_final_view1_corrects + est_final_view2_corrects
# state estimation errors
est_err = torch.cat([self.modulo_views(init_views1 - est_init_views1).abs().float(),
self.modulo_views(init_views2 - est_init_views2).abs().float()])
est_err += torch.cat([self.modulo_views(rotated_views1 - est_final_views1).abs().float(),
self.modulo_views(rotated_views2 - est_final_views2).abs().float()])
est_err = est_err.mean()
return {
'probs': probs,
'action_loss': loss,
'labels': cls_labels,
'is_visual': is_visual,
'num_steps': num_steps,
'total_correct_init_view_est': total_correct_init_view_est,
'total_correct_final_view_est': total_correct_final_view_est,
'est_error': est_err,
'est_init_views1': est_init_views1,
'est_init_views2': est_init_views2,
'est_final_views1': est_final_views1,
'est_final_views2': est_final_views2,
}
else:
return {
'probs': probs,
'num_steps': num_steps,
}
def estimate_state(self, img1_n_feats, img2_n_feats, lang_feats, init_view_force, perform_estimate):
# to device
img1_n_feats = img1_n_feats.to(device=self.device).float()
img2_n_feats = img2_n_feats.to(device=self.device).float()
lang_feats = lang_feats.to(device=self.device).float()
all_probs = []
bs = img1_n_feats.shape[0]
# lang encoding
lang_feats = self.lang_fc(lang_feats)
# normalize
if self.cfg['train']['normalize_feats']:
img1_n_feats /= img1_n_feats.norm(dim=-1, keepdim=True)
img2_n_feats /= img2_n_feats.norm(dim=-1, keepdim=True)
lang_feats /= lang_feats.norm(dim=-1, keepdim=True)
# compute single_cls probs for 8 view pairs
for v in range(self.num_views):
# aggregate
img1_feats = img1_n_feats[:, v]
img2_feats = img2_n_feats[:, v]
# img1 prob
img1_feats = self.img_fc(img1_feats)
img1_prob = self.cls_fc(torch.cat([img1_feats, lang_feats], dim=-1))
# img2 prob
img2_feats = self.img_fc(img2_feats)
img2_prob = self.cls_fc(torch.cat([img2_feats, lang_feats], dim=-1))
# cat probs
view_probs = torch.cat([img1_prob, img2_prob], dim=-1)
all_probs.append(view_probs)
all_probs = torch.stack(all_probs, dim=1)
all_probs = F.softmax(all_probs, dim=2)
# best views with highest classifier probs
best_views1 = all_probs[:, :, 0].argmax(-1)
best_views2 = all_probs[:, :, 1].argmax(-1)
# worst views with lowest classifier probs
worst_views1 = all_probs[:, :, 0].argmin(-1)
worst_views2 = all_probs[:, :, 0].argmin(-1)
# Initialize with worst views
if init_view_force == 'adv':
init_views1 = worst_views1
init_views2 = worst_views2
else:
# initialize with random views
if init_view_force is None:
init_views1 = torch.randint(self.num_views, (bs,)).cuda()
init_views2 = torch.randint(self.num_views, (bs,)).cuda()
else:
init_views1 = init_view_force[0].to(device=self.device).int()
init_views2 = init_view_force[1].to(device=self.device).int()
# init features
img1_init_feats = torch.stack([img1_n_feats[i, init_views1[i], :] for i in range(bs)])
img2_init_feats = torch.stack([img2_n_feats[i, init_views2[i], :] for i in range(bs)])
gt_init_views1 = F.one_hot(init_views1.to(torch.int64), num_classes=self.num_views)
gt_init_views2 = F.one_hot(init_views2.to(torch.int64), num_classes=self.num_views)
if perform_estimate:
# state estimator
est_init_views_logits1 = self.state_fc(img1_init_feats)
est_init_views_logits2 = self.state_fc(img2_init_feats)
# state estimation loss
est_loss_weight = self.cfg['train']['loss']['est_weight']
loss = ((self.smoothed_cross_entropy(est_init_views_logits1, gt_init_views1) +
self.smoothed_cross_entropy(est_init_views_logits2, gt_init_views2)) / 2) * est_loss_weight
est_init_views1 = F.softmax(est_init_views_logits1, dim=-1).argmax(-1)
est_init_views2 = F.softmax(est_init_views_logits2, dim=-1).argmax(-1)
else:
loss = 0
est_init_views1 = init_views1
est_init_views2 = init_views2
return {
'best_views1': best_views1,
'best_views2': best_views2,
'img1_n_feats': img1_n_feats,
'img2_n_feats': img2_n_feats,
'lang_feats': lang_feats,
'loss': loss,
'init_views1': init_views1,
'init_views2': init_views2,
'est_init_views1': est_init_views1,
'est_init_views2': est_init_views2,
}
def modulo_views(self, views):
bs = views.shape[0]
modulo_views = torch.zeros_like(views)
for b in range(bs):
view = views[b]
if view < 4 and view >= -4:
modulo_views[b] = view
elif view >= 4:
modulo_views[b] = -4 + (view % 4)
elif view < -4:
modulo_views[b] = 4 - (abs(view) % 4)
return modulo_views
def choose_feats_from_random_views(self, bs, img1_n_feats, img2_n_feats, init_views1, init_views2):
rand_next_views = torch.randint(self.num_views, (2, bs))
img1_chosen_feats = torch.stack([img1_n_feats[i, [init_views1[i], rand_next_views[0, i]], :].max(dim=-2)[0]
for i in range(bs)])
img2_chosen_feats = torch.stack([img2_n_feats[i, [init_views2[i], rand_next_views[1, i]], :].max(dim=-2)[0]
for i in range(bs)])
return img1_chosen_feats, img2_chosen_feats, rand_next_views[0], rand_next_views[1]
def compute_metrics(self, labels, loss, probs, visual, num_steps,
total_correct_init_view_est, total_correct_final_view_est):
batch_size = probs.shape[0]
val_total, val_correct, val_pl_correct = 0, 0, 0.
visual_total, visual_correct, pl_visual_correct = 0, 0, 0.
nonvis_total, nonvis_correct, pl_nonvis_correct = 0, 0, 0.
for b in range(batch_size):
correct = self.check_correct(b, labels, probs)
if correct:
val_correct += 1
val_pl_correct += 1. / num_steps[b]
val_total += 1
if bool(visual[b]):
if correct:
visual_correct += 1
pl_visual_correct += 1. / num_steps[b]
visual_total += 1
else:
if correct:
nonvis_correct += 1
pl_nonvis_correct += 1. / num_steps[b]
nonvis_total += 1
correct_ests = total_correct_init_view_est + total_correct_final_view_est
total_rots = 2 * batch_size
val_acc = float(val_correct) / val_total
val_pl_acc = float(val_pl_correct) / val_total
val_visual_acc = float(visual_correct) / visual_total
val_pl_visual_acc = float(pl_visual_correct) / visual_total
val_nonvis_acc = float(nonvis_correct) / nonvis_total
val_pl_nonvis_acc = float(pl_nonvis_correct) / nonvis_total
val_est_init_err = (total_rots - float(total_correct_init_view_est)) / total_rots
val_est_final_err = (total_rots - float(total_correct_final_view_est)) / total_rots
val_est_err = (2 * total_rots - float(correct_ests)) / (2 * total_rots)
return dict(
val_loss=loss,
val_acc=val_acc,
val_pl_acc=val_pl_acc,
val_correct=val_correct,
val_pl_correct=val_pl_correct,
val_total=val_total,
val_visual_acc=val_visual_acc,
val_pl_visual_acc=val_pl_visual_acc,
val_visual_correct=visual_correct,
val_pl_visual_correct=pl_visual_correct,
val_visual_total=visual_total,
val_nonvis_acc=val_nonvis_acc,
val_pl_nonvis_acc=val_pl_nonvis_acc,
val_nonvis_correct=nonvis_correct,
val_pl_nonvis_correct=pl_nonvis_correct,
val_nonvis_total=nonvis_total,
val_est_init_err=val_est_init_err,
val_est_final_err=val_est_final_err,
val_est_err=val_est_err
)
def training_step(self, batch, batch_idx):
out = self.forward(batch, teacher_force=self.cfg['train']['rotator']['teacher_force'])
if self.log_data:
wandb.log({
'tr/loss': out['action_loss'],
})
return dict(
loss=out['action_loss']
)
def validation_step(self, batch, batch_idx):
all_view_results = {}
views = list(range(self.num_views))
for view in views:
# view selection
if self.cfg['val']['adversarial_init_view']:
out = self.forward(batch, teacher_force=False, init_view_force='adv')
else:
bs = batch[1].shape[0] # get batch size off lang feats (entry index 1 in batch)
init_view_force = [torch.ones((bs,)).int().cuda() * view,
torch.ones((bs,)).int().cuda() * view]
out = self.forward(batch, teacher_force=False, init_view_force=init_view_force)
# losses
losses = self._criterion(out)
loss = losses['loss']
probs = out['probs']
labels = out['labels']
visual = out['is_visual']
num_steps = out['num_steps']
total_correct_init_view_est = out['total_correct_init_view_est']
total_correct_final_view_est = out['total_correct_final_view_est']
metrics = self.compute_metrics(labels, loss, probs, visual, num_steps,
total_correct_init_view_est, total_correct_final_view_est)
all_view_results[view] = metrics
mean_val_loss = np.mean([m['val_loss'].detach().cpu().float() for m in all_view_results.values()])
mean_val_acc = np.mean([m['val_acc'] for m in all_view_results.values()])
return dict(
val_loss=mean_val_loss,
val_acc=mean_val_acc,
all_view_results=all_view_results,
)
def validation_epoch_end(self, all_outputs, mode='vl'):
n_view_res = {}
views = list(range(self.num_views))
sanity_check = True
for view in views:
view_res = {
'val_loss': 0.0,
'val_correct': 0,
'val_pl_correct': 0,
'val_total': 0,
'val_visual_correct': 0,
'val_pl_visual_correct': 0,
'val_visual_total': 0,
'val_nonvis_correct': 0,
'val_pl_nonvis_correct': 0,
'val_nonvis_total': 0,
'val_est_init_err': 0.0,
'val_est_final_err': 0.0,
'val_est_err': 0.0,
}
for output in all_outputs:
metrics = output['all_view_results'][view]
view_res['val_loss'] += metrics['val_loss'].item()
view_res['val_correct'] += metrics['val_correct']
view_res['val_pl_correct'] += int(metrics['val_pl_correct'])
view_res['val_total'] += metrics['val_total']
view_res['val_visual_correct'] += metrics['val_visual_correct']
view_res['val_pl_visual_correct'] += int(metrics['val_pl_visual_correct'])
view_res['val_visual_total'] += metrics['val_visual_total']
view_res['val_nonvis_correct'] += metrics['val_nonvis_correct']
view_res['val_pl_nonvis_correct'] += int(metrics['val_pl_nonvis_correct'])
view_res['val_nonvis_total'] += metrics['val_nonvis_total']
view_res['val_est_init_err'] += metrics['val_est_init_err']
view_res['val_est_final_err'] += metrics['val_est_final_err']
view_res['val_est_err'] += metrics['val_est_err']
view_res['val_loss'] = float(view_res['val_loss']) / len(all_outputs)
view_res['val_acc'] = float(view_res['val_correct']) / view_res['val_total']
view_res['val_pl_acc'] = float(view_res['val_pl_correct']) / view_res['val_total']
if view_res['val_total'] > 128:
sanity_check = False
view_res['val_visual_acc'] = float(view_res['val_visual_correct']) / view_res['val_visual_total']
view_res['val_pl_visual_acc'] = float(view_res['val_pl_visual_correct']) / view_res['val_visual_total']
view_res['val_nonvis_acc'] = float(view_res['val_nonvis_correct']) / view_res['val_nonvis_total']
view_res['val_pl_nonvis_acc'] = float(view_res['val_pl_nonvis_correct']) / view_res['val_nonvis_total']
view_res['val_est_init_err'] = float(view_res['val_est_init_err']) / len(all_outputs)
view_res['val_est_final_err'] = float(view_res['val_est_final_err']) / len(all_outputs)
view_res['val_est_err'] = float(view_res['val_est_err']) / len(all_outputs)
n_view_res[view] = view_res
mean_val_loss = np.mean([r['val_loss'] for r in n_view_res.values()])
val_acc = sum([r['val_correct'] for r in n_view_res.values()]) / float(sum([r['val_total'] for r in n_view_res.values()]))
val_visual_acc = sum([r['val_visual_correct'] for r in n_view_res.values()]) / float(sum([r['val_visual_total'] for r in n_view_res.values()]))
val_nonvis_acc = sum([r['val_nonvis_correct'] for r in n_view_res.values()]) / float(sum([r['val_nonvis_total'] for r in n_view_res.values()]))
val_pl_acc = sum([r['val_pl_correct'] for r in n_view_res.values()]) / float(sum([r['val_total'] for r in n_view_res.values()]))
val_pl_visual_acc = sum([r['val_pl_visual_correct'] for r in n_view_res.values()]) / float(sum([r['val_visual_total'] for r in n_view_res.values()]))
val_pl_nonvis_acc = sum([r['val_pl_nonvis_correct'] for r in n_view_res.values()]) / float(sum([r['val_nonvis_total'] for r in n_view_res.values()]))
val_est_err = np.mean([r['val_est_err'] for r in n_view_res.values()])
res = {
f'{mode}/loss': mean_val_loss,
f'{mode}/acc': val_acc,
f'{mode}/acc_visual': val_visual_acc,
f'{mode}/acc_nonvis': val_nonvis_acc,
f'{mode}/pl_acc': val_pl_acc,
f'{mode}/pl_acc_visual': val_pl_visual_acc,
f'{mode}/pl_acc_nonvis': val_pl_nonvis_acc,
f'{mode}/est_err': val_est_err,
f'{mode}/all_view_res': n_view_res,
}
if not sanity_check: # only check best conditions and dump data if this isn't a sanity check
if mode == 'test':
self.best_test_res = dict(res)
else:
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
self.best_val_res = dict(res)
dump_res = self.best_test_res if mode == 'test' else self.best_val_res
# print best result
print("\nBest-----:")
best_loss = dump_res[f'{mode}/loss']
best_acc = dump_res[f'{mode}/acc']
best_acc_visual = dump_res[f'{mode}/acc_visual']
best_acc_nonvis = dump_res[f'{mode}/acc_nonvis']
best_pl_acc = dump_res[f'{mode}/pl_acc']
best_pl_acc_visual = dump_res[f'{mode}/pl_acc_visual']
best_pl_acc_nonvis = dump_res[f'{mode}/pl_acc_nonvis']
best_est_err = dump_res[f'{mode}/est_err']
seed = self.cfg['train']['random_seed']
json_file = os.path.join(self.save_path, f'{mode}-results-{seed}.json')
with open(json_file, 'w') as f:
json.dump(dump_res, f, sort_keys=True, indent=4)
print(f'Curr Acc: {res[f"{mode}/acc"]:0.5f} ({res[f"{mode}/pl_acc"]:0.5f}) | Visual {res[f"{mode}/acc_visual"]:0.5f} ({res[f"{mode}/pl_acc_visual"]:0.5f}) | Nonvis: {res[f"{mode}/acc_nonvis"]:0.5f} ({res[f"{mode}/pl_acc_nonvis"]:0.5f}) | Avg. Est Err: {res[f"{mode}/est_err"]:0.5f} | Val Loss: {res[f"{mode}/loss"]:0.8f} ')
print(f'Best Acc: {best_acc:0.5f} ({best_pl_acc:0.5f}) | Visual {best_acc_visual:0.5f} ({best_pl_acc_visual:0.5f}) | Nonvis: {best_acc_nonvis:0.5f} ({best_pl_acc_nonvis:0.5f}) | Avg. Est Err: {best_est_err:0.5f} | Val Loss: {best_loss:0.8f} ')
print("------------")
if self.log_data:
wandb.log(res)
return dict(
val_loss=mean_val_loss,
val_acc=val_acc,
val_visual_acc=val_visual_acc,
val_nonvis_acc=val_nonvis_acc,
val_pl_acc=val_pl_acc,
val_pl_visual_acc=val_pl_visual_acc,
val_pl_nonvis_acc=val_pl_nonvis_acc,
)
| 23,767 | 41.980108 | 335 | py |
snare | snare-master/models/__init__.py | from models.single_cls import SingleClassifier
from models.zero_shot_cls import ZeroShotClassifier
from models.rotator import Rotator
names = {
# classifiers
'single_cls': SingleClassifier,
'zero_shot_cls': ZeroShotClassifier,
# rotators
'rotator': Rotator,
}
| 282 | 20.769231 | 51 | py |
snare | snare-master/scripts/extract_clip_features.py | import os
import torch
from PIL import Image
import numpy as np
from numpy import asarray
import clip
import pickle, gzip, json
from tqdm import tqdm
# Set filepaths
shapenet_images_path = './data/shapenet-images/screenshots'
ann_files = ["train.json", "val.json", "test.json"]
folds = './amt/folds_adversarial'
keys = os.listdir(shapenet_images_path)
# Load pre-trained CLIP
device = "cuda" if torch.cuda.is_available() else "cpu"
clip_model, preprocess = clip.load("ViT-B/32", device=device)
# Extract CLIP visual features
data = {}
for key in tqdm(keys):
pngs = os.listdir(os.path.join(shapenet_images_path, f"{key}"))
pngs = [os.path.join(shapenet_images_path, f"{key}", p) for p in pngs if "png" in p]
pngs.sort()
for png in pngs:
im = Image.open(png)
image = preprocess(im).unsqueeze(0).to(device)
image_features = clip_model.encode_image(image).squeeze(0).detach().cpu().numpy()
image_features = image_features.tolist()
name = png.split('/')[-1].replace(".png", "")
data[name] = image_features
save_path = './data/shapenet-clipViT32-frames.json.gz'
json.dump(data, gzip.open(save_path,'wt'))
# Extract CLIP language features
anns = []
for file in ann_files:
fname_rel = os.path.join(folds, file)
print(fname_rel)
with open(fname_rel, 'r') as f:
anns = anns + json.load(f)
lang_feat = {}
for d in tqdm(anns):
ann = d['annotation']
text = clip.tokenize([ann]).to(device)
feat = clip_model.encode_text(text)
feat = feat.squeeze(0).detach().cpu().numpy()
feat = feat.tolist()
lang_feat[ann] = feat
save_path = './data/langfeat-512-clipViT32.json.gz'
json.dump(lang_feat, gzip.open(save_path,'wt')) | 1,724 | 25.953125 | 89 | py |
snare | snare-master/scripts/aggregate_results.py |
import argparse
import json
import os
import numpy as np
import pandas as pd
from scipy.stats import ttest_ind
from tqdm import tqdm
clip_model_types = ['clip-single_cls-maxpool',
'clip-single_cls-meanpool',
'clip-single_cls-random_index',
'clip-single_cls-two_random_index',
'clip-zero_shot_cls-maxpool',
'clip-zero_shot_cls-meanpool',
'clip-zero_shot_cls-random_index',
'clip-zero_shot_cls-two_random_index']
rotator_model_types = ['clip-rotator-two_random_index']
THRESH = 0.05
welchs_opts = {'equal_var': False,
'alternative': 'two-sided'}
def main(args):
# Assemble validation and test results.
d = []
erroneous_result_files = []
missing_result_files = []
for model_types, prefix_dir, aux in \
[[clip_model_types, "%s_seed_" % args.clip_results_dir_prefix, 'none'],
[rotator_model_types, "%s_init_seed_" % args.rotator_results_dir_prefix, 'init'],
[rotator_model_types, "%s_final_seed_" % args.rotator_results_dir_prefix, 'final'],
[rotator_model_types, "%s_init_final_seed_" % args.rotator_results_dir_prefix, 'both']]:
for model in model_types:
n_seeds = 1 if 'zero_shot' in model else args.n_seeds # zero-shot models have no inference-time variance
for seed in range(n_seeds):
# Read in validation results.
fn = os.path.join("%s%d" % (prefix_dir, seed), model, 'vl-results-%d.json' % seed)
if not os.path.isfile(fn):
missing_result_files.append(fn)
continue
with open(fn, 'r') as f:
seed_results = json.load(f)
# Check that the result isn't from a pytorch lightning sanity check.
if seed_results['vl/all_view_res']['0']['val_total'] < 2000:
erroneous_result_files.append(fn)
entry = {'model': model,
'aux': aux,
'seed': seed,
'fold': 'val',
'acc': seed_results['vl/acc'],
'acc_nonvis': seed_results['vl/acc_nonvis'],
'acc_visual': seed_results['vl/acc_visual']}
d.append(entry)
# Compute test results.
if args.test_set_answers_fn:
fn = os.path.join("%s%d" % (prefix_dir, seed), model, 'test-results-%d.json' % seed)
if not os.path.isfile(fn) or args.force_calculate_test_results: # calculate test results
model_family = 'zeroshotclassifier'
if 'single_cls' in model:
model_family = 'singleclassifier'
if 'rotator' in model:
model_family = 'rotator'
results_fn = os.path.join("%s%d" % (prefix_dir, seed), model, '%s_test_results.json' % model_family)
if not os.path.isfile(results_fn):
missing_result_files.append(results_fn)
continue
seed_results = compute_test_metrics(args.test_set_answers_fn, results_fn)
# Write test results so we don't have to do this again.
with open(fn, 'w') as f:
json.dump(seed_results, f)
else:
with open(fn, 'r') as f:
seed_results = json.load(f)
entry = {'model': model,
'aux': aux,
'seed': seed,
'fold': 'test',
'acc': seed_results['test/acc'],
'acc_nonvis': seed_results['test/acc_nonvis'],
'acc_visual': seed_results['test/acc_visual']}
d.append(entry)
# Data statistics and tests.
df = pd.DataFrame(d)
comparisons = [(('clip-single_cls-maxpool', 'none'), ('clip-single_cls-meanpool', 'none')),
(('clip-rotator-two_random_index', 'both'), ('clip-single_cls-maxpool', 'none')),
(('clip-rotator-two_random_index', 'both'), ('clip-single_cls-two_random_index', 'none')),
]
comp_folds = ['val', 'test']
comp_metrics = ['acc'] # , 'acc_nonvis', 'acc_visual']
for fold in comp_folds:
print('fold=%s' % fold)
for (model_a, aux_a), (model_b, aux_b) in comparisons:
print("(%s, %s) compared to (%s, %s)" % (model_a, aux_a, model_b, aux_b))
for metric in comp_metrics:
a = df.loc[(df['model'] == model_a) & (df['aux'] == aux_a) & (df['fold'] == fold)][metric]
b = df.loc[(df['model'] == model_b) & (df['aux'] == aux_b) & (df['fold'] == fold)][metric]
print('\t%s\t\t\t\t\t\tmean\tstd\tN' % metric)
print('\t\t%s\t%.3f\t%.3f\t%d' % (model_a, np.mean(a), np.std(a), len(a)))
print('\t\t%s\t%.3f\t%.3f\t%d' % (model_b, np.mean(b), np.std(b), len(b)))
t, p = ttest_ind(a, b, **welchs_opts)
print('\t\t\tp=%f; sig=%d' %
(p, 1 if p < THRESH / ((len(comp_folds) * len(comparisons) * len(comp_metrics)) - 1) else 0))
# Subtract one from Bonferroni correction because we don't actually want to run/care about
# the maxpool/meanpool comparison on the test fold.
# Populate LaTeX table
# [model] & [views] & [v viz] & [v nonviz] & [v all] & [t viz] & [t nonviz] & [t all]
# CLIP & 360 & 84.5 & 66.1 & 75.3 & 80.0 & 61.4 & 70.9 \\
# \scorer & 360 & \bf 90.6 & \bf 79.3 & \bf 85.0 & \bf 85.9 & 71.3 & \bf 78.7 \\
# \midrule
# CLIP & Single & 79.5 & 65.2 & 72.3 & 73.9 & 60.4 & 67.3 \\
# \scorer\ & Single & \bf 89.4 & \bf 75.6 & \bf 82.5 & \bf 84.1 & \bf 69.6 & \bf 77.0 \\
# \midrule
# CLIP & Two & 81.7 & 65.5 & 73.6 & 76.2 & 61.0 & 68.8 \\
# \scorer\ & Two & 91.2 & 75.1 & 83.2 & 85.8 & 70.9 & 78.5 \\
# \model\ & Two & \B{91.5} & \B{81.2} & \B{86.3} & \B{86.6} & \B{72.0} & \B{79.4} \\
for comp_set in \
[[['clip-zero_shot_cls-maxpool', 'CLIP', '360-max', 'none'],
['clip-zero_shot_cls-meanpool', 'CLIP', '360-mean', 'none'],
['clip-single_cls-maxpool', '\\scorer', '360-max', 'none'],
['clip-single_cls-meanpool', '\\scorer', '360-mean', 'none']],
[['clip-zero_shot_cls-random_index', 'CLIP', 'Single', 'none'],
['clip-single_cls-random_index', '\\scorer', 'Single', 'none']],
[['clip-zero_shot_cls-two_random_index', 'CLIP', 'Two', 'none'],
['clip-single_cls-two_random_index', '\\scorer', 'Two', 'none'],
['clip-rotator-two_random_index', '\\model-init', 'Two', 'init'],
['clip-rotator-two_random_index', '\\model-final', 'Two', 'final'],
['clip-rotator-two_random_index', '\\model-both', 'Two', 'both']],
]:
for model, model_str, views, aux in comp_set:
ss = ['%s & %s' % (model_str, views)]
for fold in ['val', 'test']:
for metric in ['acc_visual', 'acc_nonvis', 'acc']:
a = df.loc[(df['model'] == model) & (df['fold'] == fold) & (df['aux'] == aux)][metric]
ss.append('%.1f (%.1f)' % (np.mean(a) * 100., np.std(a) * 100.))
print(' & '.join(ss) + ' \\\\')
print('\\midrule')
if len(missing_result_files) > 0:
print('WARNING: The following results files are expected but were not found; results may shift')
print('\n'.join(missing_result_files))
if len(erroneous_result_files) > 0:
print('WARNING: The following results files are likely bad perf estimates from PTL sanity checks')
print('\n'.join(erroneous_result_files))
# answers_fn - filepath to answers_json
# output_fn - filepath to output dump, e.g., zeroshotclassifier_test_results.json
def compute_test_metrics(answers_fn, output_fn):
# load JSONs
with open(answers_fn, 'r') as f:
answers = json.load(f)
with open(output_fn, 'r') as f:
output = json.load(f)
num_views = 8
n_view_res = {}
mode = 'test'
for view in range(num_views):
print(f"processing view: {view}")
view_res = {
'correct': 0,
'pl_correct': 0,
'total': 0,
'visual_correct': 0,
'pl_visual_correct': 0,
'visual_total': 0,
'nonvis_correct': 0,
'pl_nonvis_correct': 0,
'nonvis_total': 0,
}
for idx, o in enumerate(tqdm(output[str(view)])):
# pdb.set_trace()
assert (o['objects'] == answers[idx]['objects']), \
'Prediction instance does not match answers ' + str(o['objects']) + ' ' + str(answers[idx]['objects'])
pred_ans = o['pred_ans']
corr_ans = answers[idx]['ans']
correct = (pred_ans == corr_ans)
num_steps = o['num_steps']
is_visual = answers[idx]['visual']
if correct:
view_res['correct'] += 1
view_res['pl_correct'] += 1. / num_steps
view_res['total'] += 1
if is_visual:
if correct:
view_res['visual_correct'] += 1
view_res['pl_visual_correct'] += 1. / float(num_steps)
view_res['visual_total'] += 1
else:
if correct:
view_res['nonvis_correct'] += 1
view_res['pl_nonvis_correct'] += 1. / float(num_steps)
view_res['nonvis_total'] += 1
view_res['acc'] = float(view_res['correct']) / view_res['total']
view_res['pl_acc'] = float(view_res['pl_correct']) / view_res['total']
view_res['visual_acc'] = float(view_res['visual_correct']) / view_res['visual_total']
view_res['pl_visual_acc'] = float(view_res['pl_visual_correct']) / view_res['visual_total']
view_res['nonvis_acc'] = float(view_res['nonvis_correct']) / view_res['nonvis_total']
view_res['pl_nonvis_acc'] = float(view_res['pl_nonvis_correct']) / view_res['nonvis_total']
n_view_res[view] = view_res
acc = sum([r['correct'] for r in n_view_res.values()]) / float(sum([r['total'] for r in n_view_res.values()]))
visual_acc = sum([r['visual_correct'] for r in n_view_res.values()]) / float(
sum([r['visual_total'] for r in n_view_res.values()]))
nonvis_acc = sum([r['nonvis_correct'] for r in n_view_res.values()]) / float(
sum([r['nonvis_total'] for r in n_view_res.values()]))
pl_acc = sum([r['pl_correct'] for r in n_view_res.values()]) / float(sum([r['total'] for r in n_view_res.values()]))
pl_visual_acc = sum([r['pl_visual_correct'] for r in n_view_res.values()]) / float(
sum([r['visual_total'] for r in n_view_res.values()]))
pl_nonvis_acc = sum([r['pl_nonvis_correct'] for r in n_view_res.values()]) / float(
sum([r['nonvis_total'] for r in n_view_res.values()]))
res = {
f'{mode}/acc': acc,
f'{mode}/acc_visual': visual_acc,
f'{mode}/acc_nonvis': nonvis_acc,
f'{mode}/pl_acc': pl_acc,
f'{mode}/pl_acc_visual': pl_visual_acc,
f'{mode}/pl_acc_nonvis': pl_nonvis_acc,
f'{mode}/all_view_res': n_view_res,
}
# results to save
results_dict = dict(res)
best_acc = results_dict[f'{mode}/acc']
best_acc_visual = results_dict[f'{mode}/acc_visual']
best_acc_nonvis = results_dict[f'{mode}/acc_nonvis']
best_pl_acc = results_dict[f'{mode}/pl_acc']
best_pl_acc_visual = results_dict[f'{mode}/pl_acc_visual']
best_pl_acc_nonvis = results_dict[f'{mode}/pl_acc_nonvis']
# print best result
print("\nBest-----:")
print(
f'Best {mode} Acc: {best_acc:0.5f} ({best_pl_acc:0.5f}) | Visual {best_acc_visual:0.5f} ({best_pl_acc_visual:0.5f}) | Nonvis: {best_acc_nonvis:0.5f} ({best_pl_acc_nonvis:0.5f}) ')
print("------------")
return results_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--clip_results_dir_prefix', type=str, required=True,
help='CLIP and MATCH results dir prefix before adding seed')
parser.add_argument('--rotator_results_dir_prefix', type=str, required=True,
help='Rotator results dir prefix before adding seed and losses')
parser.add_argument('--n_seeds', type=int, required=True,
help='The number of seeds to index')
parser.add_argument('--test_set_answers_fn', type=str, required=False,
help='The test set annotations for final test eval; not publicly available')
parser.add_argument('--force_calculate_test_results', action='store_true')
args = parser.parse_args()
main(args)
| 13,282 | 46.270463 | 187 | py |
snare | snare-master/data/dataset.py | import os
import json
import torch
import torch.utils.data
import numpy as np
import gzip
import json
class CLIPGraspingDataset(torch.utils.data.Dataset):
def __init__(self, cfg, mode='train'):
self.total_views = 14
self.cfg = cfg
self.mode = mode
self.folds = os.path.join(self.cfg['data']['amt_data'], self.cfg['data']['folds'])
self.feats_backbone = self.cfg['train']['feats_backbone']
self.load_entries()
self.load_extracted_features()
def load_entries(self):
train_train_files = ["train.json"]
train_val_files = ["val.json"]
test_test_files = ["test.json"]
# modes
if self.mode == "train":
self.files = train_train_files
elif self.mode == 'valid':
self.files = train_val_files
elif self.mode == "test":
self.files = test_test_files
else:
raise RuntimeError('mode not recognized, should be train, valid or test: ' + str(self.mode))
# load amt data
self.data = []
for file in self.files:
fname_rel = os.path.join(self.folds, file)
print(fname_rel)
with open(fname_rel, 'r') as f:
self.data = self.data + json.load(f)
print(f"Loaded Entries. {self.mode}: {len(self.data)} entries")
def load_extracted_features(self):
if self.feats_backbone == "clip":
lang_feats_path = self.cfg['data']['clip_lang_feats']
with gzip.open(lang_feats_path, 'r') as f:
self.lang_feats = json.load(f)
img_feats_path = self.cfg['data']['clip_img_feats']
with gzip.open(img_feats_path, 'r') as f:
self.img_feats = json.load(f)
else:
raise NotImplementedError()
def __len__(self):
return len(self.data)
def get_img_feats(self, key):
feats = []
for i in range(self.total_views):
feat = np.array(self.img_feats[f'{key}-{i}'])
feats.append(feat)
return np.array(feats)
def __getitem__(self, idx):
entry = self.data[idx]
# get keys
entry_idx = entry['ans'] if 'ans' in entry else -1 # test set does not contain answers
if len(entry['objects']) == 2:
key1, key2 = entry['objects']
# fix missing key in pair
else:
key1 = entry['objects'][entry_idx]
while True:
key2 = np.random.choice(list(self.img_feats.keys())).split("-")[0]
if key2 != key1:
break
# annotation
annotation = entry['annotation']
is_visual = entry['visual'] if 'ans' in entry else -1 # test set does not have labels for visual and non-visual categories
# feats
start_idx = 6 # discard first 6 views that are top and bottom viewpoints
img1_n_feats = torch.from_numpy(self.get_img_feats(key1))[start_idx:]
img2_n_feats = torch.from_numpy(self.get_img_feats(key2))[start_idx:]
lang_feats = torch.from_numpy(np.array(self.lang_feats[annotation]))
# label
ans = entry_idx
return (
(img1_n_feats, img2_n_feats),
lang_feats,
ans,
(key1, key2),
annotation,
is_visual,
) | 3,365 | 30.754717 | 130 | py |
snare | snare-master/data/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/core/Lagrange.py | import numpy as np
from scipy.special import roots_legendre
def computeFejerRule(n):
"""
Compute a Fejer rule of the first kind, using DFT (Waldvogel 2006)
Inspired from quadpy (https://github.com/nschloe/quadpy @Nico_Schlömer)
Parameters
----------
n : int
Number of points for the quadrature rule.
Returns
-------
nodes : np.1darray(n)
The nodes of the quadrature rule
weights : np.1darray(n)
The weights of the quadrature rule.
"""
# Initialize output variables
n = int(n)
nodes = np.empty(n, dtype=float)
weights = np.empty(n, dtype=float)
# Compute nodes
theta = np.arange(1, n + 1, dtype=float)[-1::-1]
theta *= 2
theta -= 1
theta *= np.pi / (2 * n)
np.cos(theta, out=nodes)
# Compute weights
# -- Initial variables
N = np.arange(1, n, 2)
lN = len(N)
m = n - lN
K = np.arange(m)
# -- Build v0
v0 = np.concatenate([2 * np.exp(1j * np.pi * K / n) / (1 - 4 * K**2), np.zeros(lN + 1)])
# -- Build v1 from v0
v1 = np.empty(len(v0) - 1, dtype=complex)
np.conjugate(v0[:0:-1], out=v1)
v1 += v0[:-1]
# -- Compute inverse Fourier transform
w = np.fft.ifft(v1)
if max(w.imag) > 1.0e-15:
raise ValueError(f'Max imaginary value to important for ifft: {max(w.imag)}')
# -- Store weights
weights[:] = w.real
return nodes, weights
class LagrangeApproximation(object):
r"""
Class approximating any function on a given set of points using barycentric
Lagrange interpolation.
Let note :math:`(t_j)_{0\leq j<n}` the set of points, then any scalar
function :math:`f` can be approximated by the barycentric formula :
.. math::
p(x) =
\frac{\displaystyle \sum_{j=0}^{n-1}\frac{w_j}{x-x_j}f_j}
{\displaystyle \sum_{j=0}^{n-1}\frac{w_j}{x-x_j}},
where :math:`f_j=f(t_j)` and
.. math::
w_j = \frac{1}{\prod_{k\neq j}(x_j-x_k)}
are the barycentric weights.
The theory and implementation is inspired from `this paper <http://dx.doi.org/10.1137/S0036144502417715>`_.
Parameters
----------
points : list, tuple or np.1darray
The given interpolation points, no specific scaling, but must be
ordered in increasing order.
Attributes
----------
points : np.1darray
The interpolating points
weights : np.1darray
The associated barycentric weights
"""
def __init__(self, points):
points = np.asarray(points).ravel()
diffs = points[:, None] - points[None, :]
diffs[np.diag_indices_from(diffs)] = 1
def analytic(diffs):
# Fast implementation (unstable for large number of points)
invProd = np.prod(diffs, axis=1)
invProd **= -1
return invProd
with np.errstate(divide='raise', over='ignore'):
try:
weights = analytic(diffs)
except FloatingPointError:
raise ValueError('Lagrange formula unstable for that much nodes')
# Store attributes
self.points = points
self.weights = weights
@property
def n(self):
return self.points.size
def getInterpolationMatrix(self, times):
r"""
Compute the interpolation matrix for a given set of discrete "time"
points.
For instance, if we note :math:`\vec{f}` the vector containing the
:math:`f_j=f(t_j)` values, and :math:`(\tau_m)_{0\leq m<M}`
the "time" points where to interpolate.
Then :math:`I[\vec{f}]`, the vector containing the interpolated
:math:`f(\tau_m)` values, can be obtained using :
.. math::
I[\vec{f}] = P_{Inter} \vec{f},
where :math:`P_{Inter}` is the interpolation matrix returned by this
method.
Parameters
----------
times : list-like or np.1darray
The discrete "time" points where to interpolate the function.
Returns
-------
PInter : np.2darray(M, n)
The interpolation matrix, with :math:`M` rows (size of the **times**
parameter) and :math:`n` columns.
"""
# Compute difference between times and Lagrange points
times = np.asarray(times)
with np.errstate(divide='ignore'):
iDiff = 1 / (times[:, None] - self.points[None, :])
# Find evaluated positions that coincide with one Lagrange point
concom = (iDiff == np.inf) | (iDiff == -np.inf)
i, j = np.where(concom)
# Replace iDiff by on on those lines to get a simple copy of the value
iDiff[i, :] = concom[i, :]
# Compute interpolation matrix using weights
PInter = iDiff * self.weights
PInter /= PInter.sum(axis=-1)[:, None]
return PInter
def getIntegrationMatrix(self, intervals, numQuad='LEGENDRE_NUMPY'):
r"""
Compute the integration matrix for a given set of intervals.
For instance, if we note :math:`\vec{f}` the vector containing the
:math:`f_j=f(t_j)` values, and
:math:`(\tau_{m,left}, \tau_{m,right})_{0\leq m<M}` the different
interval where the function should be integrated using the barycentric
interpolant polynomial.
Then :math:`\Delta[\vec{f}]`, the vector containing the approximations
of
.. math::
\int_{\tau_{m,left}}^{\tau_{m,right}} f(t)dt,
can be obtained using :
.. math::
\Delta[\vec{f}] = P_{Integ} \vec{f},
where :math:`P_{Integ}` is the interpolation matrix returned by this
method.
Parameters
----------
intervals : list of pairs
A list of all integration intervals.
numQuad : str, optional
Quadrature rule used to integrate the interpolant barycentric
polynomial. Can be :
- 'LEGENDRE_NUMPY' : Gauss-Legendre rule from Numpy
- 'LEGENDRE_SCIPY' : Gauss-Legendre rule from Scipy
- 'FEJER' : internaly implemented Fejer-I rule
The default is 'LEGENDRE_NUMPY'.
Returns
-------
PInter : np.2darray(M, n)
The integration matrix, with :math:`M` rows (number of intervals)
and :math:`n` columns.
"""
if numQuad == 'LEGENDRE_NUMPY':
# Legendre gauss rule, integrate exactly polynomials of deg. (2n-1)
iNodes, iWeights = np.polynomial.legendre.leggauss((self.n + 1) // 2)
elif numQuad == 'LEGENDRE_SCIPY':
# Using Legendre scipy implementation
iNodes, iWeights = roots_legendre((self.n + 1) // 2)
elif numQuad == 'FEJER':
# Fejer-I rule, integrate exactly polynomial of deg. n-1
iNodes, iWeights = computeFejerRule(self.n - ((self.n + 1) % 2))
else:
raise NotImplementedError(f'numQuad={numQuad}')
# Compute quadrature nodes for each interval
intervals = np.array(intervals)
aj, bj = intervals[:, 0][:, None], intervals[:, 1][:, None]
tau, omega = iNodes[None, :], iWeights[None, :]
tEval = (bj - aj) / 2 * tau + (bj + aj) / 2
# Compute the integrand function on nodes
integrand = self.getInterpolationMatrix(tEval.ravel()).T.reshape((-1,) + tEval.shape)
# Apply quadrature rule to integrate
integrand *= omega
integrand *= (bj - aj) / 2
PInter = integrand.sum(axis=-1).T
return PInter
| 7,567 | 31.34188 | 111 | py |
pySDC | pySDC-master/pySDC/core/Nodes.py | import numpy as np
from scipy.linalg import eigh_tridiagonal
NODE_TYPES = ['EQUID', 'LEGENDRE', 'CHEBY-1', 'CHEBY-2', 'CHEBY-3', 'CHEBY-4']
QUAD_TYPES = ['GAUSS', 'RADAU-LEFT', 'RADAU-RIGHT', 'LOBATTO']
class NodesError(Exception):
"""Exception class to handle error in NodesGenerator class"""
pass
class NodesGenerator(object):
"""
Class that can be used to generate generic distribution of nodes derived
from Gauss quadrature rule.
Its implementation is fully inspired from a `book of W. Gautschi <https://doi.org/10.1093/oso/9780198506720.001.0001>`_.
Attributes
----------
node_type : str
The type of node distribution
quad_type : str
The quadrature type
"""
def __init__(self, node_type='LEGENDRE', quad_type='LOBATTO'):
"""
Parameters
----------
node_type : str, optional
The type of node distribution, can be
- EQUID : equidistant nodes
- LEGENDRE : node distribution from Legendre polynomials
- CHEBY-1 : node distribution from Chebychev polynomials (1st kind)
- CHEBY-2 : node distribution from Chebychev polynomials (2nd kind)
- CHEBY-3 : node distribution from Chebychev polynomials (3rd kind)
- CHEBY-4 : node distribution from Chebychev polynomials (4th kind)
The default is 'LEGENDRE'.
quad_type : str, optional
The quadrature type, can be
- GAUSS : inner point only, no node at boundary
- RADAU-LEFT : only left boundary as node
- RADAU-RIGHT : only right boundary as node
- LOBATTO : left and right boundary as node
The default is 'LOBATTO'.
"""
# Check argument validity
for arg, vals in zip(['node_type', 'quad_type'], [NODE_TYPES, QUAD_TYPES]):
val = eval(arg)
if val not in vals:
raise NodesError(f"{arg}='{val}' not implemented, must be in {vals}")
# Store attributes
self.node_type = node_type
self.quad_type = quad_type
def getNodes(self, num_nodes):
"""
Computes a given number of quadrature nodes.
Parameters
----------
num_nodes : int
Number of nodes to compute.
Returns
-------
nodes : np.1darray
Nodes located in [-1, 1], in increasing order.
"""
# Check number of nodes
if self.quad_type in ['LOBATTO', 'RADAU-LEFT'] and num_nodes < 2:
raise NodesError(f"num_nodes must be larger than 2 for {self.quad_type}, " f"but for {num_nodes}")
elif num_nodes < 1:
raise NodesError("you surely want at least one node ;)")
# Equidistant nodes
if self.node_type == 'EQUID':
if self.quad_type == 'GAUSS':
return np.linspace(-1, 1, num=num_nodes + 2)[1:-1]
elif self.quad_type == 'LOBATTO':
return np.linspace(-1, 1, num=num_nodes)
elif self.quad_type == 'RADAU-RIGHT':
return np.linspace(-1, 1, num=num_nodes + 1)[1:]
elif self.quad_type == 'RADAU-LEFT':
return np.linspace(-1, 1, num=num_nodes + 1)[:-1]
# Quadrature nodes linked to orthogonal polynomials
alpha, beta = self.getTridiagCoefficients(num_nodes)
nodes = eigh_tridiagonal(alpha, np.sqrt(beta[1:]))[0]
nodes.sort()
return nodes
def getOrthogPolyCoefficients(self, num_coeff):
"""
Produces a given number of analytic three-term recurrence coefficients.
Parameters
----------
num_coeff : int
Number of coefficients to compute.
Returns
-------
alpha : np.1darray
The alpha coefficients of the three-term recurrence.
beta : np.1darray
The beta coefficients of the three-term recurrence.
"""
if self.node_type == 'LEGENDRE':
k = np.arange(num_coeff, dtype=float)
alpha = 0 * k
beta = k**2 / (4 * k**2 - 1)
beta[0] = 2
elif self.node_type == 'CHEBY-1':
alpha = np.zeros(num_coeff)
beta = np.full(num_coeff, 0.25)
beta[0] = np.pi
if num_coeff > 1:
beta[1] = 0.5
elif self.node_type == 'CHEBY-2':
alpha = np.zeros(num_coeff)
beta = np.full(num_coeff, 0.25)
beta[0] = np.pi / 2
elif self.node_type == 'CHEBY-3':
alpha = np.zeros(num_coeff)
alpha[0] = 0.5
beta = np.full(num_coeff, 0.25)
beta[0] = np.pi
elif self.node_type == 'CHEBY-4':
alpha = np.zeros(num_coeff)
alpha[0] = -0.5
beta = np.full(num_coeff, 0.25)
beta[0] = np.pi
return alpha, beta
def evalOrthogPoly(self, t, alpha, beta):
"""
Evaluates the two higher order orthogonal polynomials corresponding
to the given (alpha,beta) coefficients.
Parameters
----------
t : float or np.1darray
The point where to evaluate the orthogonal polynomials.
alpha : np.1darray
The alpha coefficients of the three-term recurrence.
beta : np.1darray
The beta coefficients of the three-term recurrence.
Returns
-------
pi[0] : float or np.1darray
The second higher order orthogonal polynomial evaluation.
pi[1] : float or np.1darray
The higher oder orthogonal polynomial evaluation.
"""
t = np.asarray(t, dtype=float)
pi = np.array([np.zeros_like(t) for i in range(3)])
pi[1:] += 1
for alpha_j, beta_j in zip(alpha, beta):
pi[2] *= t - alpha_j
pi[0] *= beta_j
pi[2] -= pi[0]
pi[0] = pi[1]
pi[1] = pi[2]
return pi[0], pi[1]
def getTridiagCoefficients(self, num_nodes):
"""
Computes recurrence coefficients for the tridiagonal Jacobian matrix,
taking into account the quadrature type.
Parameters
----------
num_nodes : int
Number of nodes that should be computed from those coefficients.
Returns
-------
alpha : np.1darray
The modified alpha coefficients of the three-term recurrence.
beta : np.1darray
The modified beta coefficients of the three-term recurrence.
"""
# Coefficients for Gauss quadrature type
alpha, beta = self.getOrthogPolyCoefficients(num_nodes)
# If not Gauss quadrature type, modify the alpha/beta coefficients
if self.quad_type.startswith('RADAU'):
b = -1.0 if self.quad_type.endswith('LEFT') else 1.0
b1, b2 = self.evalOrthogPoly(b, alpha[:-1], beta[:-1])[:2]
alpha[-1] = b - beta[-1] * b1 / b2
elif self.quad_type == 'LOBATTO':
a, b = -1.0, 1.0
a2, a1 = self.evalOrthogPoly(a, alpha[:-1], beta[:-1])[:2]
b2, b1 = self.evalOrthogPoly(b, alpha[:-1], beta[:-1])[:2]
alpha[-1], beta[-1] = np.linalg.solve([[a1, a2], [b1, b2]], [a * a1, b * b1])
return alpha, beta
| 7,344 | 33.646226 | 124 | py |
pySDC | pySDC-master/pySDC/core/Sweeper.py | import logging
import numpy as np
import scipy.linalg
import scipy.optimize as opt
from pySDC.core.Errors import ParameterError
from pySDC.core.Level import level
from pySDC.core.Collocation import CollBase
from pySDC.helpers.pysdc_helper import FrozenClass
# short helper class to add params as attributes
class _Pars(FrozenClass):
def __init__(self, pars):
self.do_coll_update = False
self.initial_guess = 'spread'
self.skip_residual_computation = () # gain performance at the cost of correct residual output
for k, v in pars.items():
if k != 'collocation_class':
setattr(self, k, v)
self._freeze()
class sweeper(object):
"""
Base abstract sweeper class
Attributes:
logger: custom logger for sweeper-related logging
params (__Pars): parameter object containing the custom parameters passed by the user
coll (pySDC.Collocation.CollBase): collocation object
"""
def __init__(self, params):
"""
Initialization routine for the base sweeper
Args:
params (dict): parameter object
"""
# set up logger
self.logger = logging.getLogger('sweeper')
essential_keys = ['num_nodes']
for key in essential_keys:
if key not in params:
msg = 'need %s to instantiate step, only got %s' % (key, str(params.keys()))
self.logger.error(msg)
raise ParameterError(msg)
if 'collocation_class' not in params:
params['collocation_class'] = CollBase
# prepare random generator for initial guess
if params.get('initial_guess', 'spread') == 'random':
params['random_seed'] = params.get('random_seed', 1984)
self.rng = np.random.RandomState(params['random_seed'])
self.params = _Pars(params)
coll = params['collocation_class'](**params)
if not coll.right_is_node and not self.params.do_coll_update:
self.logger.warning(
'we need to do a collocation update here, since the right end point is not a node. Changing this!'
)
self.params.do_coll_update = True
# This will be set as soon as the sweeper is instantiated at the level
self.__level = None
# collocation object
self.coll = coll
self.parallelizable = False
def get_Qdelta_implicit(self, coll, qd_type):
def rho(x):
return max(abs(np.linalg.eigvals(np.eye(m) - np.diag([x[i] for i in range(m)]).dot(coll.Qmat[1:, 1:]))))
QDmat = np.zeros(coll.Qmat.shape)
if qd_type == 'LU':
QT = coll.Qmat[1:, 1:].T
[_, _, U] = scipy.linalg.lu(QT, overwrite_a=True)
QDmat[1:, 1:] = U.T
elif qd_type == 'LU2':
QT = coll.Qmat[1:, 1:].T
[_, _, U] = scipy.linalg.lu(QT, overwrite_a=True)
QDmat[1:, 1:] = 2 * U.T
elif qd_type == 'TRAP':
for m in range(coll.num_nodes + 1):
QDmat[m, 1 : m + 1] = coll.delta_m[0:m]
for m in range(coll.num_nodes + 1):
QDmat[m, 0:m] += coll.delta_m[0:m]
QDmat /= 2.0
elif qd_type == 'IE':
for m in range(coll.num_nodes + 1):
QDmat[m, 1 : m + 1] = coll.delta_m[0:m]
elif qd_type == 'IEpar':
for m in range(coll.num_nodes + 1):
QDmat[m, m] = np.sum(coll.delta_m[0:m])
self.parallelizable = True
elif qd_type == 'Qpar':
QDmat = np.diag(np.diag(coll.Qmat))
self.parallelizable = True
elif qd_type == 'GS':
QDmat = np.tril(coll.Qmat)
elif qd_type == 'PIC':
QDmat = np.zeros(coll.Qmat.shape)
self.parallelizable = True
elif qd_type == 'MIN':
m = QDmat.shape[0] - 1
x0 = 10 * np.ones(m)
d = opt.minimize(rho, x0, method='Nelder-Mead')
QDmat[1:, 1:] = np.linalg.inv(np.diag(d.x))
self.parallelizable = True
elif qd_type == 'MIN_GT':
m = QDmat.shape[0] - 1
QDmat[1:, 1:] = np.diag(coll.nodes) / m
elif qd_type == 'MIN3':
m = QDmat.shape[0] - 1
x = None
# These values have been obtained using Indie Solver, a commercial solver for black-box optimization which
# aggregates several state-of-the-art optimization methods (free academic subscription plan)
# objective function: sum over 17^2 values of lamdt, real and imaginary (WORKS SURPRISINGLY WELL!)
if coll.node_type == 'LEGENDRE' and coll.quad_type == 'LOBATTO':
if m == 9:
# rho = 0.154786693955
x = [
0.0,
0.14748983547536937,
0.1243753767395874,
0.08797965969063823,
0.03249792877433364,
0.06171633442251176,
0.08995295998705832,
0.1080641868728824,
0.11621787232558443,
]
elif m == 7:
# rho = 0.0979351256833
x = [
0.0,
0.18827968699454273,
0.1307213945012976,
0.04545003319140543,
0.08690617895312261,
0.12326429119922168,
0.13815746843252427,
]
elif m == 5:
# rho = 0.0513543155235
x = [0.0, 0.2994085231050721, 0.07923154575177252, 0.14338847088077, 0.17675509273708057]
elif m == 4:
# rho = 0.0381589713397
x = [0.0, 0.2865524188780046, 0.11264992497015984, 0.2583063168320655]
elif m == 3:
# rho = 0.013592619664
x = [0.0, 0.2113181799416633, 0.3943250920445912]
elif m == 2:
# rho = 0
x = [0.0, 0.5]
else:
NotImplementedError(
'This combination of preconditioner, node type and node number is not ' 'implemented'
)
elif coll.node_type == 'LEGENDRE' and coll.quad_type == 'RADAU-RIGHT':
if m == 9:
# rho = 0.151784861385
x = [
0.14208076083211416,
0.1288153963623986,
0.10608601069476883,
0.07509520272252024,
0.027986167728305308,
0.05351160749903067,
0.07911315989747868,
0.09514844658836666,
0.10204992319487571,
]
elif m == 7:
# rho = 0.116400161888
x = [
0.15223871397682717,
0.12625448001038536,
0.08210714764924298,
0.03994434742760019,
0.1052662547386142,
0.14075805578834127,
0.15636085758812895,
]
elif m == 5:
# rho = 0.0783352996958 (iteration 5355)
x = [
0.2818591930905709,
0.2011358490453793,
0.06274536689514164,
0.11790265267514095,
0.1571629578515223,
]
elif m == 4:
# rho = 0.057498908343
x = [0.3198786751412953, 0.08887606314792469, 0.1812366328324738, 0.23273925017954]
elif m == 3:
# rho = 0.038744192979 (iteration 11188)
x = [0.3203856825077055, 0.1399680686269595, 0.3716708461097372]
elif m == 2:
# rho = 0.0208560702294 (iteration 6690)
x = [0.2584092406077449, 0.6449261740461826]
else:
raise NotImplementedError(
'This combination of preconditioner, node type and node number is not implemented'
)
elif coll.node_type == 'EQUID' and coll.quad_type == 'RADAU-RIGHT':
if m == 9:
# rho = 0.251820022583 (iteration 32402)
x = [
0.04067333763109274,
0.06893408176924318,
0.0944460427779633,
0.11847528720123894,
0.14153236351607695,
0.1638856774260845,
0.18569759470199648,
0.20707543960267513,
0.2280946565716198,
]
elif m == 7:
# rho = 0.184582997611 (iteration 44871)
x = [
0.0582690792096515,
0.09937620459067688,
0.13668728443669567,
0.1719458323664216,
0.20585615258818232,
0.2387890485242656,
0.27096908017041393,
]
elif m == 5:
# rho = 0.118441339197 (iteration 34581)
x = [
0.0937126798932547,
0.1619131388001843,
0.22442341539247537,
0.28385142992912565,
0.3412523013467262,
]
elif m == 4:
# rho = 0.0844043254542 (iteration 33099)
x = [0.13194852204686872, 0.2296718892453916, 0.3197255970017318, 0.405619746972393]
elif m == 3:
# rho = 0.0504635143866 (iteration 9783)
x = [0.2046955744931575, 0.3595744268324041, 0.5032243650307717]
elif m == 2:
# rho = 0.0214806480623 (iteration 6109)
x = [0.3749891032632652, 0.6666472946796036]
else:
NotImplementedError(
'This combination of preconditioner, node type and node number is not ' 'implemented'
)
else:
NotImplementedError(
'This combination of preconditioner, node type and node number is not ' 'implemented'
)
QDmat[1:, 1:] = np.diag(x)
self.parallelizable = True
else:
raise NotImplementedError(f'qd_type implicit "{qd_type}" not implemented')
# check if we got not more than a lower triangular matrix
np.testing.assert_array_equal(
np.triu(QDmat, k=1), np.zeros(QDmat.shape), err_msg='Lower triangular matrix expected!'
)
return QDmat
def get_Qdelta_explicit(self, coll, qd_type):
QDmat = np.zeros(coll.Qmat.shape)
if qd_type == 'EE':
for m in range(self.coll.num_nodes + 1):
QDmat[m, 0:m] = self.coll.delta_m[0:m]
elif qd_type == 'GS':
QDmat = np.tril(self.coll.Qmat, k=-1)
elif qd_type == 'PIC':
QDmat = np.zeros(coll.Qmat.shape)
else:
raise NotImplementedError('qd_type explicit not implemented')
# check if we got not more than a lower triangular matrix
np.testing.assert_array_equal(
np.triu(QDmat, k=0), np.zeros(QDmat.shape), err_msg='Strictly lower triangular matrix expected!'
)
return QDmat
def predict(self):
"""
Predictor to fill values at nodes before first sweep
Default prediction for the sweepers, only copies the values to all collocation nodes
and evaluates the RHS of the ODE there
"""
# get current level and problem description
L = self.level
P = L.prob
# evaluate RHS at left point
L.f[0] = P.eval_f(L.u[0], L.time)
for m in range(1, self.coll.num_nodes + 1):
# copy u[0] to all collocation nodes, evaluate RHS
if self.params.initial_guess == 'spread':
L.u[m] = P.dtype_u(L.u[0])
L.f[m] = P.eval_f(L.u[m], L.time + L.dt * self.coll.nodes[m - 1])
# start with zero everywhere
elif self.params.initial_guess == 'zero':
L.u[m] = P.dtype_u(init=P.init, val=0.0)
L.f[m] = P.dtype_f(init=P.init, val=0.0)
# start with random initial guess
elif self.params.initial_guess == 'random':
L.u[m] = P.dtype_u(init=P.init, val=self.rng.rand(1)[0])
L.f[m] = P.dtype_f(init=P.init, val=self.rng.rand(1)[0])
else:
raise ParameterError(f'initial_guess option {self.params.initial_guess} not implemented')
# indicate that this level is now ready for sweeps
L.status.unlocked = True
L.status.updated = True
def compute_residual(self, stage=None):
"""
Computation of the residual using the collocation matrix Q
Args:
stage (str): The current stage of the step the level belongs to
"""
# get current level and problem description
L = self.level
# Check if we want to skip the residual computation to gain performance
# Keep in mind that skipping any residual computation is likely to give incorrect outputs of the residual!
if stage in self.params.skip_residual_computation:
L.status.residual = 0.0 if L.status.residual is None else L.status.residual
return None
# check if there are new values (e.g. from a sweep)
# assert L.status.updated
# compute the residual for each node
# build QF(u)
res_norm = []
res = self.integrate()
for m in range(self.coll.num_nodes):
res[m] += L.u[0] - L.u[m + 1]
# add tau if associated
if L.tau[m] is not None:
res[m] += L.tau[m]
# use abs function from data type here
res_norm.append(abs(res[m]))
# find maximal residual over the nodes
if L.params.residual_type == 'full_abs':
L.status.residual = max(res_norm)
elif L.params.residual_type == 'last_abs':
L.status.residual = res_norm[-1]
elif L.params.residual_type == 'full_rel':
L.status.residual = max(res_norm) / abs(L.u[0])
elif L.params.residual_type == 'last_rel':
L.status.residual = res_norm[-1] / abs(L.u[0])
else:
raise ParameterError(
f'residual_type = {L.params.residual_type} not implemented, choose '
f'full_abs, last_abs, full_rel or last_rel instead'
)
# indicate that the residual has seen the new values
L.status.updated = False
return None
def compute_end_point(self):
"""
Abstract interface to end-node computation
"""
raise NotImplementedError('ERROR: sweeper has to implement compute_end_point(self)')
def integrate(self):
"""
Abstract interface to right-hand side integration
"""
raise NotImplementedError('ERROR: sweeper has to implement integrate(self)')
def update_nodes(self):
"""
Abstract interface to node update
"""
raise NotImplementedError('ERROR: sweeper has to implement update_nodes(self)')
@property
def level(self):
"""
Returns the current level
Returns:
pySDC.Level.level: the current level
"""
return self.__level
@level.setter
def level(self, L):
"""
Sets a reference to the current level (done in the initialization of the level)
Args:
L (pySDC.Level.level): current level
"""
assert isinstance(L, level)
self.__level = L
| 16,446 | 37.607981 | 118 | py |
pySDC | pySDC-master/pySDC/core/ConvergenceController.py | import logging
from pySDC.helpers.pysdc_helper import FrozenClass
# short helper class to add params as attributes
class Pars(FrozenClass):
def __init__(self, params):
self.control_order = 0 # integer that determines the order in which the convergence controllers are called
self.useMPI = None # depends on the controller
for k, v in params.items():
setattr(self, k, v)
self._freeze()
# short helper class to store status variables
class Status(FrozenClass):
"""
Initialize status variables with None, since at the time of instantiation of the convergence controllers, not all
relevant information about the controller are known.
"""
def __init__(self, status_variabes):
[setattr(self, key, None) for key in status_variabes]
self._freeze()
class ConvergenceController(object):
"""
Base abstract class for convergence controller, which is plugged into the controller to determine the iteration
count and time step size.
"""
def __init__(self, controller, params, description, **kwargs):
"""
Initialization routine
Args:
controller (pySDC.Controller): The controller
params (dict): The params passed for this specific convergence controller
description (dict): The description object used to instantiate the controller
"""
self.params = Pars(self.setup(controller, params, description))
params_ok, msg = self.check_parameters(controller, params, description)
assert params_ok, msg
self.dependencies(controller, description)
self.logger = logging.getLogger(f"{type(self).__name__}")
def log(self, msg, S, level=15, **kwargs):
"""
Shortcut that has a default level for the logger. 15 is above debug but below info.
Args:
msg (str): Message you want to log
S (pySDC.step): The current step
level (int): the level passed to the logger
Returns:
None
"""
self.logger.log(level, f'Process {S.status.slot:2d} on time {S.time:.6f} - {msg}')
return None
def setup(self, controller, params, description, **kwargs):
"""
Setup various variables that only need to be set once in the beginning.
If the convergence controller is added automatically, you can give it params by adding it manually.
It will be instantiated only once with the manually supplied parameters overriding automatically added
parameters.
This function scans the convergence controllers supplied to the description object for instances of itself.
This corresponds to the convergence controller being added manually by the user. If something is found, this
function will then return a composite dictionary from the `params` passed to this function as well as the
`params` passed manually, with priority to manually added parameters. If you added the convergence controller
manually, that is of course the same and nothing happens. If, on the other hand, the convergence controller was
added automatically, the `params` passed here will come from whatever added it and you can now override
parameters by adding the convergence controller manually.
This relies on children classes to return a composite dictionary from their defaults and from the result of this
function, so you should write
```
return {**defaults, **super().setup(controller, params, description, **kwargs)}
```
when overloading this method in a child class, with `defaults` a dictionary containing default parameters.
Args:
controller (pySDC.Controller): The controller
params (dict): The params passed for this specific convergence controller
description (dict): The description object used to instantiate the controller
Returns:
(dict): The updated params dictionary after setup
"""
# allow to change parameters by adding the convergence controller manually
return {**params, **description.get('convergence_controllers', {}).get(type(self), {})}
def dependencies(self, controller, description, **kwargs):
"""
Load dependencies on other convergence controllers here.
Args:
controller (pySDC.Controller): The controller
description (dict): The description object used to instantiate the controller
Returns:
None
"""
pass
def check_parameters(self, controller, params, description, **kwargs):
"""
Check whether parameters are compatible with whatever assumptions went into the step size functions etc.
Args:
controller (pySDC.Controller): The controller
params (dict): The params passed for this specific convergence controller
description (dict): The description object used to instantiate the controller
Returns:
bool: Whether the parameters are compatible
str: The error message
"""
return True, ""
def check_iteration_status(self, controller, S, **kwargs):
"""
Determine whether to keep iterating or not in this function.
Args:
controller (pySDC.Controller): The controller
S (pySDC.Step): The current step
Returns:
None
"""
pass
def get_new_step_size(self, controller, S, **kwargs):
"""
This function allows to set a step size with arbitrary criteria.
Make sure to give an order to the convergence controller by setting the `control_order` variable in the params.
This variable is an integer and you can see what the current order is by using
`controller.print_convergence_controllers()`.
Args:
controller (pySDC.Controller): The controller
S (pySDC.Step): The current step
Returns:
None
"""
pass
def determine_restart(self, controller, S, **kwargs):
"""
Determine for each step separately if it wants to be restarted for whatever reason.
Args:
controller (pySDC.Controller): The controller
S (pySDC.Step): The current step
Returns:
None
"""
pass
def reset_status_variables(self, controller, **kwargs):
"""
Reset status variables.
This is called in the `restart_block` function.
Args:
controller (pySDC.Controller): The controller
Returns:
None
"""
return None
def setup_status_variables(self, controller, **kwargs):
"""
Setup status variables.
This is not done at the time of instantiation, since the controller is not fully instantiated at that time and
hence not all information are available. Instead, this function is called after the controller has been fully
instantiated.
Args:
controller (pySDC.Controller): The controller
Returns:
None
"""
return None
def reset_buffers_nonMPI(self, controller, **kwargs):
"""
Buffers refer to variables used across multiple steps that are stored in the convergence controller classes to
imitate communication in non MPI versions. These have to be reset in order to replicate availability of
variables in MPI versions.
For instance, if step 0 sets self.buffers.x = 1 from self.buffers.x = 0, when the same MPI rank uses the
variable with step 1, it will still carry the value of self.buffers.x = 1, equivalent to a send from the rank
computing step 0 to the rank computing step 1.
However, you can only receive what somebody sent and in order to make sure that is true for the non MPI
versions, we reset after each iteration so you cannot use this function to communicate backwards from the last
step to the first one for instance.
This function is called both at the end of instantiating the controller, as well as after each iteration.
Args:
controller (pySDC.Controller): The controller
Returns:
None
"""
pass
def pre_iteration_processing(self, controller, S, **kwargs):
"""
Do whatever you want to before each iteration here.
Args:
controller (pySDC.Controller): The controller
S (pySDC.Step): The current step
Returns:
None
"""
pass
def post_iteration_processing(self, controller, S, **kwargs):
"""
Do whatever you want to after each iteration here.
Args:
controller (pySDC.Controller): The controller
S (pySDC.Step): The current step
Returns:
None
"""
pass
def post_step_processing(self, controller, S, **kwargs):
"""
Do whatever you want to after each step here.
Args:
controller (pySDC.Controller): The controller
S (pySDC.Step): The current step
Returns:
None
"""
pass
def prepare_next_block(self, controller, S, size, time, Tend, **kwargs):
"""
Prepare stuff like spreading step sizes or whatever.
Args:
controller (pySDC.Controller): The controller
S (pySDC.Step): The current step
size (int): The number of ranks
time (float): The current time will be list in nonMPI controller implementation
Tend (float): The final time
Returns:
None
"""
pass
def convergence_control(self, controller, S, **kwargs):
"""
Call all the functions related to convergence control.
This is called in `it_check` in the controller after every iteration just after `post_iteration_processing`.
Args:
controller (pySDC.Controller): The controller
S (pySDC.Step): The current step
Returns:
None
"""
self.get_new_step_size(controller, S, **kwargs)
self.determine_restart(controller, S, **kwargs)
self.check_iteration_status(controller, S, **kwargs)
return None
def post_spread_processing(self, controller, S, **kwargs):
"""
This function is called at the end of the `SPREAD` stage in the controller
Args:
controller (pySDC.Controller): The controller
S (pySDC.Step): The current step
"""
pass
def send(self, comm, dest, data, blocking=False, **kwargs):
"""
Send data to a different rank
Args:
comm (mpi4py.MPI.Intracomm): Communicator
dest (int): The target rank
data: Data to be sent
blocking (bool): Whether the communication is blocking or not
Returns:
request handle of the communication
"""
# log what's happening for debug purposes
self.logger.debug(f'Step {comm.rank} initiates send to step {dest}')
kwargs['tag'] = kwargs.get('tag', abs(self.params.control_order))
if blocking:
req = comm.send(data, dest=dest, **kwargs)
else:
req = comm.isend(data, dest=dest, **kwargs)
# log what's happening for debug purposes
self.logger.debug(f'Step {comm.rank} leaves send to step {dest}')
return req
def recv(self, comm, source, **kwargs):
"""
Receive some data
Args:
comm (mpi4py.MPI.Intracomm): Communicator
source (int): Where to look for receiving
Returns:
whatever has been received
"""
# log what's happening for debug purposes
self.logger.debug(f'Step {comm.rank} initiates receive from step {source}')
kwargs['tag'] = kwargs.get('tag', abs(self.params.control_order))
data = comm.recv(source=source, **kwargs)
# log what's happening for debug purposes
self.logger.debug(f'Step {comm.rank} leaves receive from step {source}')
return data
def reset_variable(self, controller, name, MPI=False, place=None, where=None, init=None):
"""
Utility function for resetting variables. This function will call the `add_variable` function with all the same
arguments, but with `allow_overwrite = True`.
Args:
controller (pySDC.Controller): The controller
name (str): The name of the variable
MPI (bool): Whether to use MPI controller
place (object): The object you want to reset the variable of
where (list): List of strings containing a path to where you want to reset the variable
init: Initial value of the variable
Returns:
None
"""
self.add_variable(controller, name, MPI, place, where, init, allow_overwrite=True)
def add_variable(self, controller, name, MPI=False, place=None, where=None, init=None, allow_overwrite=False):
"""
Add a variable to a frozen class.
This function goes through the path to the destination of the variable recursively and adds it to all instances
that are possible in the path. For example, giving `where = ["MS", "levels", "status"]` will result in adding a
variable to the status object of all levels of all steps of the controller.
Part of the functionality of the frozen class is to separate initialization and setting of variables. By
enforcing this, you can make sure not to overwrite already existing variables. Since this function is called
outside of the `__init__` function of the status objects, this can otherwise lead to bugs that are hard to find.
For this reason, you need to specifically set `allow_overwrite = True` if you want to forgo the check if the
variable already exists. This can be useful when resetting variables between steps, but make sure to set it to
`allow_overwrite = False` the first time you add a variable.
Args:
controller (pySDC.Controller): The controller
name (str): The name of the variable
MPI (bool): Whether to use MPI controller
place (object): The object you want to add the variable to
where (list): List of strings containing a path to where you want to add the variable
init: Initial value of the variable
allow_overwrite (bool): Allow overwriting the variables if they already exist or raise an exception
Returns:
None
"""
where = ["S" if MPI else "MS", "levels", "status"] if where is None else where
place = controller if place is None else place
# check if we have arrived at the end of the path to the variable
if len(where) == 0:
variable_exitsts = name in place.__dict__.keys()
# check if the variable already exists and raise an error in case we are about to introduce a bug
if not allow_overwrite and variable_exitsts:
raise ValueError(f"Key \"{name}\" already exists in {place}! Please rename the variable in {self}")
# if we allow overwriting, but the variable does not exist already, we are violating the intended purpose
# of this function, so we also raise an error if someone should be so mad as to attempt this
elif allow_overwrite and not variable_exitsts:
raise ValueError(f"Key \"{name}\" is supposed to be overwritten in {place}, but it does not exist!")
# actually add or overwrite the variable
place.__dict__[name] = init
# follow the path to the final destination recursively
else:
# get all possible new places to continue the path
new_places = place.__dict__[where[0]]
# continue all possible paths
if type(new_places) == list:
# loop through all possibilities
for new_place in new_places:
self.add_variable(
controller,
name,
MPI=MPI,
place=new_place,
where=where[1:],
init=init,
allow_overwrite=allow_overwrite,
)
else:
# go to the only possible possibility
self.add_variable(
controller,
name,
MPI=MPI,
place=new_places,
where=where[1:],
init=init,
allow_overwrite=allow_overwrite,
)
| 17,029 | 37.355856 | 120 | py |
pySDC | pySDC-master/pySDC/core/Collocation.py | import logging
import numpy as np
from pySDC.core.Nodes import NodesGenerator
from pySDC.core.Errors import CollocationError
from pySDC.core.Lagrange import LagrangeApproximation
class CollBase(object):
"""
Generic collocation class, that contains everything to do integration over
intervals and between nodes.
It can be used to produce many kind of quadrature nodes from various
distribution (awesome!).
It is based on the two main parameters that define the nodes :
- node_type : the node distribution used for the collocation method
- quad_type : the type of quadrature used (inclusion of not of boundary)
Current implementation provides the following available parameter values
for node_type :
- EQUID : equidistant node distribution
- LEGENDRE : distribution from Legendre polynomials
- CHEBY-{1,2,3,4} : distribution from Chebyshev polynomials of a given kind
The type of quadrature cann be GAUSS (only inner nodes), RADAU-LEFT
(inclusion of the left boundary), RADAU-RIGHT (inclusion of the right
boundary) and LOBATTO (inclusion of left and right boundary).
Here is the equivalency table with the (old) original classes implemented
in pySDC :
+-------------------------+-----------+-------------+
| Original Class | node_type | quad_type |
+=========================+===========+=============+
| Equidistant | EQUID | LOBATTO |
+-------------------------+-----------+-------------+
| EquidistantInner | EQUID | GAUSS |
+-------------------------+-----------+-------------+
| EquidistantNoLeft | EQUID | RADAU-RIGHT |
+-------------------------+-----------+-------------+
| CollGaussLegendre | LEGENDRE | GAUSS |
+-------------------------+-----------+-------------+
| CollGaussLobatto | LEGENDRE | LOBATTO |
+-------------------------+-----------+-------------+
| CollGaussRadau_Left | LEGENDRE | RADAU-LEFT |
+-------------------------+-----------+-------------+
| CollGaussRadau_Right | LEGENDRE | RADAU-RIGHT |
+-------------------------+-----------+-------------+
Attributes:
num_nodes (int): number of collocation nodes
tleft (float): left interval point
tright (float): right interval point
nodes (numpy.ndarray): array of quadrature nodes
weights (numpy.ndarray): array of quadrature weights for the full interval
Qmat (numpy.ndarray): matrix containing the weights for tleft to node
Smat (numpy.ndarray): matrix containing the weights for node to node
delta_m (numpy.ndarray): array of distances between nodes
right_is_node (bool): flag to indicate whether right point is collocation node
left_is_node (bool): flag to indicate whether left point is collocation node
"""
def __init__(self, num_nodes=None, tleft=0, tright=1, node_type='LEGENDRE', quad_type=None, **kwargs):
"""
Initialization routine for a collocation object
Args:
num_nodes (int): number of collocation nodes
tleft (float): left interval point
tright (float): right interval point
"""
if not num_nodes > 0:
raise CollocationError('At least one quadrature node required, got %s' % num_nodes)
if not tleft < tright:
raise CollocationError('Interval boundaries are corrupt, got %s and %s' % (tleft, tright))
self.logger = logging.getLogger('collocation')
# Set number of nodes, left and right interval boundaries
self.num_nodes = num_nodes
self.tleft = tleft
self.tright = tright
self.node_type = node_type
self.quad_type = quad_type
# Instantiate attributes
self.nodeGenerator = NodesGenerator(self.node_type, self.quad_type)
if self.node_type == 'EQUID':
self.order = num_nodes
else:
if self.quad_type == 'GAUSS':
self.order = 2 * num_nodes
elif self.quad_type.startswith('RADAU'):
self.order = 2 * num_nodes - 1
elif self.quad_type == 'LOBATTO':
self.order = 2 * num_nodes - 2
self.left_is_node = self.quad_type in ['LOBATTO', 'RADAU-LEFT']
self.right_is_node = self.quad_type in ['LOBATTO', 'RADAU-RIGHT']
self.nodes = self._getNodes
self.weights = self._getWeights(tleft, tright)
self.Qmat = self._gen_Qmatrix
self.Smat = self._gen_Smatrix
self.delta_m = self._gen_deltas
@staticmethod
def evaluate(weights, data):
"""
Evaluates the quadrature over the full interval
Args:
weights (numpy.ndarray): array of quadrature weights for the full interval
data (numpy.ndarray): f(x) to be integrated
Returns:
numpy.ndarray: integral over f(x) between tleft and tright
"""
if not np.size(weights) == np.size(data):
raise CollocationError("Input size does not match number of weights, but is %s" % np.size(data))
return np.dot(weights, data)
def _getWeights(self, a, b):
"""
Computes weights using barycentric interpolation
Args:
a (float): left interval boundary
b (float): right interval boundary
Returns:
numpy.ndarray: weights of the collocation formula given by the nodes
"""
if self.nodes is None:
raise CollocationError(f"Need nodes before computing weights, got {self.nodes}")
# Instantiate the Lagrange interpolator object
approx = LagrangeApproximation(self.nodes)
# Compute weights
tLeft = np.ravel(self.tleft)[0]
tRight = np.ravel(self.tright)[0]
weights = approx.getIntegrationMatrix([(tLeft, tRight)], numQuad='FEJER')
return np.ravel(weights)
@property
def _getNodes(self):
"""
Computes nodes using an internal NodesGenerator object
Returns:
np.ndarray: array of Gauss-Legendre nodes
"""
# Generate normalized nodes in [-1, 1]
nodes = self.nodeGenerator.getNodes(self.num_nodes)
# Scale nodes to [tleft, tright]
a = self.tleft
b = self.tright
nodes += 1.0
nodes /= 2.0
nodes *= b - a
nodes += a
if self.left_is_node:
nodes[0] = self.tleft
if self.right_is_node:
nodes[-1] = self.tright
return nodes
@property
def _gen_Qmatrix(self):
"""
Compute tleft-to-node integration matrix for later use in collocation formulation
Returns:
numpy.ndarray: matrix containing the weights for tleft to node
"""
if self.nodes is None:
raise CollocationError(f"Need nodes before computing weights, got {self.nodes}")
M = self.num_nodes
Q = np.zeros([M + 1, M + 1])
# Instantiate the Lagrange interpolator object
approx = LagrangeApproximation(self.nodes)
# Compute tleft-to-node integration matrix
tLeft = np.ravel(self.tleft)[0]
intervals = [(tLeft, tau) for tau in self.nodes]
intQ = approx.getIntegrationMatrix(intervals, numQuad='FEJER')
# Store into Q matrix
Q[1:, 1:] = intQ
return Q
@property
def _gen_Smatrix(self):
"""
Compute node-to-node integration matrix for later use in collocation formulation
Returns:
numpy.ndarray: matrix containing the weights for node to node
"""
M = self.num_nodes
Q = self.Qmat
S = np.zeros([M + 1, M + 1])
S[1, :] = Q[1, :]
for m in np.arange(2, M + 1):
S[m, :] = Q[m, :] - Q[m - 1, :]
return S
@property
def _gen_deltas(self):
"""
Compute distances between the nodes
Returns:
numpy.ndarray: distances between the nodes
"""
M = self.num_nodes
delta = np.zeros(M)
delta[0] = self.nodes[0] - self.tleft
for m in np.arange(1, M):
delta[m] = self.nodes[m] - self.nodes[m - 1]
return delta
| 8,334 | 34.021008 | 108 | py |
pySDC | pySDC-master/pySDC/core/Common.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description
-----------
Module containing utility classe(s) from which inherit some of the pySDC base
classes.
"""
from pySDC.core.Errors import ReadOnlyError
class _MetaRegisterParams(type):
"""Metaclass for RegisterParams base class"""
def __new__(cls, name, bases, dct):
obj = super().__new__(cls, name, bases, dct)
obj._parNamesReadOnly = set()
obj._parNames = set()
return obj
class RegisterParams(metaclass=_MetaRegisterParams):
"""
Base class to register parameters.
Attributes
----------
params : dict (property)
Dictionary containing names and values of registered parameters.
_parNames : set of str
Names of all the registered parameters.
_parNamesReadOnly : set of str
Names of all the parameters registered as read-only.
"""
def _makeAttributeAndRegister(self, *names, localVars=None, readOnly=False):
"""
Register a list of attribute name as parameters of the class.
Parameters
----------
*names : list of str
The name of the parameters to be registered (should be class attributes).
localVars : dict
Dictionary containing key=names and values=paramValues for each
parNames given in names. Can be provided, for instance, using
`locals()` built-in dictionary. MUST BE provided as soon as
names contains anything.
readOnly : bool, optional
Wether or not store the parameters as read-only attributes
"""
if len(names) > 1 and localVars is None:
raise ValueError("a dictionary must be provided in localVars with parameters values")
# Set parameters as attributes
for name in names:
try:
super().__setattr__(name, localVars[name])
except KeyError: # pragma: no cover
raise ValueError(f'value for {name} not given in localVars')
# Register as class parameter
if readOnly:
self._parNamesReadOnly = self._parNamesReadOnly.union(names)
else:
self._parNames = self._parNames.union(names)
@property
def params(self):
"""Dictionary containing names and values of registered parameters"""
return {name: getattr(self, name) for name in self._parNamesReadOnly.union(self._parNames)}
def __setattr__(self, name, value):
if name in self._parNamesReadOnly:
raise ReadOnlyError(name)
super().__setattr__(name, value)
| 2,606 | 33.302632 | 99 | py |
pySDC | pySDC-master/pySDC/core/Level.py | from pySDC.helpers.pysdc_helper import FrozenClass
# short helper class to add params as attributes
class _Pars(FrozenClass):
def __init__(self, params):
self.dt = None
self.dt_initial = None
self.restol = -1.0
self.nsweeps = 1
self.residual_type = 'full_abs'
for k, v in params.items():
setattr(self, k, v)
# freeze class, no further attributes allowed from this point
self._freeze()
self.dt_initial = self.dt * 1.0
# short helper class to bundle all status variables
class _Status(FrozenClass):
"""
This class carries the status of the level. All variables that the core SDC / PFASST functionality depend on are
initialized here, while the convergence controllers are allowed to add more variables in a controlled fashion
later on using the `add_variable` function.
"""
def __init__(self):
self.residual = None
self.unlocked = False
self.updated = False
self.time = None
self.dt_new = None
self.sweep = None
# freeze class, no further attributes allowed from this point
self._freeze()
class level(FrozenClass):
"""
Level class containing all management functionality for a single level
A level contains all data structures, types and objects to perform sweeps on this particular level. It does not
know about other levels.
Attributes:
params (__Pars): parameter object containing the custom parameters passed by the user
status (__Status): status object
level_index (int): custom string naming this level
uend: dof values at the right end point of the interval
u (list of dtype_u): dof values at the nodes
uold (list of dtype_u): copy of dof values for saving data during restriction)
f (list of dtype_f): RHS values at the nodes
fold (list of dtype_f): copy of RHS values for saving data during restriction
tau (list of dtype_u): FAS correction, allocated via step class if necessary
"""
def __init__(self, problem_class, problem_params, sweeper_class, sweeper_params, level_params, level_index):
"""
Initialization routine
Args:
problem_class: problem class
problem_params (dict): parameters for the problem to be initialized
sweeper_class: sweeper class
sweeper_params (dict): parameters for the sweeper (contains collocation)
level_params (dict): parameters given by the user, will be added as attributes
level_index (int): custom name for this level
"""
# instantiate sweeper, problem and hooks
self.__sweep = sweeper_class(sweeper_params)
self.__prob = problem_class(**problem_params)
# set level parameters and status
self.params = _Pars(level_params)
self.status = _Status()
# set name
self.level_index = level_index
# empty data at the nodes, the right end point and tau
self.uend = None
self.u = [None] * (self.sweep.coll.num_nodes + 1)
self.uold = [None] * (self.sweep.coll.num_nodes + 1)
self.f = [None] * (self.sweep.coll.num_nodes + 1)
self.fold = [None] * (self.sweep.coll.num_nodes + 1)
self.tau = [None] * self.sweep.coll.num_nodes
# pass this level to the sweeper for easy access
self.sweep.level = self
self.__tag = None
# freeze class, no further attributes allowed from this point
self._freeze()
def reset_level(self, reset_status=True):
"""
Routine to clean-up the level for the next time step
Args:
reset_status (bool): Reset the status or only the solution
Returns:
None
"""
# reset status
if reset_status:
self.status = _Status()
# all data back to None
self.uend = None
self.u = [None] * (self.sweep.coll.num_nodes + 1)
self.uold = [None] * (self.sweep.coll.num_nodes + 1)
self.f = [None] * (self.sweep.coll.num_nodes + 1)
self.fold = [None] * (self.sweep.coll.num_nodes + 1)
self.tau = [None] * self.sweep.coll.num_nodes
@property
def sweep(self):
"""
Getter for the sweeper
Returns:
pySDC.Sweeper.sweeper: the sweeper associated to this level
"""
return self.__sweep
@property
def prob(self):
"""
Getter for the problem
Returns:
pySDC.Problem.ptype: the problem associated to this level
"""
return self.__prob
@property
def time(self):
"""
Meta-getter for the current time
Returns:
float: referencing status time for convenience
"""
return self.status.time
@property
def dt(self):
"""
Meta-getter for the time-step size
Returns:
float: referencing dt from parameters for convenience
"""
return self.params.dt
@property
def tag(self):
"""
Getter for tag
Returns:
tag for sending/receiving
"""
return self.__tag
@tag.setter
def tag(self, t):
"""
Setter for tag
Args:
t: new tag for sending/receiving
"""
self.__tag = t
| 5,436 | 29.038674 | 116 | py |
pySDC | pySDC-master/pySDC/core/Hooks.py | import logging
from collections import namedtuple
Entry = namedtuple('Entry', ['process', 'time', 'level', 'iter', 'sweep', 'type', 'num_restarts'])
# noinspection PyUnusedLocal,PyShadowingBuiltins,PyShadowingNames
class hooks(object):
"""
Hook class to contain the functions called during the controller runs (e.g. for calling user-routines)
When deriving a custom hook from this class make sure to always call the parent method using e.g.
`super().post_step(step, level_number)`. Otherwise bugs may arise when using `filer_recomputed` from the stats
helper for post processing.
Attributes:
logger: logger instance for output
__num_restarts (int): number of restarts of the current step
__stats (dict): dictionary for gathering the statistics of a run
__entry (namedtuple): statistics entry containing all information to identify the value
"""
def __init__(self):
"""
Initialization routine
"""
self.__num_restarts = 0
self.logger = logging.getLogger('hooks')
# create statistics and entry elements
self.__stats = {}
self.__entry = Entry
def add_to_stats(self, process, time, level, iter, sweep, type, value):
"""
Routine to add data to the statistics dict
Args:
process: the current process recording this data
time (float): the current simulation time
level (int): the current level index
iter (int): the current iteration count
sweep (int): the current sweep count
type (str): string to describe the type of value
value: the actual data
"""
# create named tuple for the key and add to dict
self.__stats[
self.__entry(
process=process,
time=time,
level=level,
iter=iter,
sweep=sweep,
type=type,
num_restarts=self.__num_restarts,
)
] = value
def increment_stats(self, process, time, level, iter, sweep, type, value, initialize=None):
"""
Routine to increment data to the statistics dict. If the data is not yet created, it will be initialized to
initialize if applicable and to value otherwise
Args:
process: the current process recording this data
time (float): the current simulation time
level (int): the current level index
iter (int): the current iteration count
sweep (int): the current sweep count
type (str): string to describe the type of value
value: the actual data
initialize: if supplied and data does not exist already, this will be used over value
"""
key = self.__entry(
process=process, time=time, level=level, iter=iter, sweep=sweep, type=type, num_restarts=self.__num_restarts
)
if key in self.__stats.keys():
self.__stats[key] += value
elif initialize is not None:
self.__stats[key] = initialize
else:
self.__stats[key] = value
def return_stats(self):
"""
Getter for the stats
Returns:
dict: stats
"""
return self.__stats
def reset_stats(self):
"""
Function to reset the stats for multiple runs
"""
self.__stats = {}
def pre_setup(self, step, level_number):
"""
Default routine called before setup starts
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0
def pre_run(self, step, level_number):
"""
Default routine called before time-loop starts
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0
def pre_predict(self, step, level_number):
"""
Default routine called before predictor starts
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0
def pre_step(self, step, level_number):
"""
Hook called before each step
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0
def pre_iteration(self, step, level_number):
"""
Default routine called before iteration starts
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0
def pre_sweep(self, step, level_number):
"""
Default routine called before sweep starts
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0
def pre_comm(self, step, level_number):
"""
Default routine called before communication starts
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0
def post_comm(self, step, level_number, add_to_stats=False):
"""
Default routine called after each communication
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
add_to_stats (bool): set if result should go to stats object
"""
self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0
def post_sweep(self, step, level_number):
"""
Default routine called after each sweep
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0
def post_iteration(self, step, level_number):
"""
Default routine called after each iteration
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0
def post_step(self, step, level_number):
"""
Default routine called after each step or block
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0
def post_predict(self, step, level_number):
"""
Default routine called after each predictor
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0
def post_run(self, step, level_number):
"""
Default routine called after each run
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0
def post_setup(self, step, level_number):
"""
Default routine called after setup
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0
| 8,349 | 33.504132 | 120 | py |
pySDC | pySDC-master/pySDC/core/BaseTransfer.py | import logging
import scipy.sparse as sp
from pySDC.core.Errors import UnlockError
from pySDC.helpers.pysdc_helper import FrozenClass
from pySDC.core.Lagrange import LagrangeApproximation
# short helper class to add params as attributes
class _Pars(FrozenClass):
def __init__(self, pars):
self.finter = False
for k, v in pars.items():
setattr(self, k, v)
self._freeze()
class base_transfer(object):
"""
Standard base_transfer class
Attributes:
logger: custom logger for sweeper-related logging
params(__Pars): parameter object containing the custom parameters passed by the user
fine (pySDC.Level.level): reference to the fine level
coarse (pySDC.Level.level): reference to the coarse level
"""
def __init__(self, fine_level, coarse_level, base_transfer_params, space_transfer_class, space_transfer_params):
"""
Initialization routine
Args:
fine_level (pySDC.Level.level): fine level connected with the base_transfer operations
coarse_level (pySDC.Level.level): coarse level connected with the base_transfer operations
base_transfer_params (dict): parameters for the base_transfer operations
space_transfer_class: class to perform spatial transfer
space_transfer_params (dict): parameters for the space_transfer operations
"""
self.params = _Pars(base_transfer_params)
# set up logger
self.logger = logging.getLogger('transfer')
# just copy by object
self.fine = fine_level
self.coarse = coarse_level
fine_grid = self.fine.sweep.coll.nodes
coarse_grid = self.coarse.sweep.coll.nodes
if len(fine_grid) == len(coarse_grid):
self.Pcoll = sp.eye(len(fine_grid)).toarray()
self.Rcoll = sp.eye(len(fine_grid)).toarray()
else:
self.Pcoll = self.get_transfer_matrix_Q(fine_grid, coarse_grid)
self.Rcoll = self.get_transfer_matrix_Q(coarse_grid, fine_grid)
# set up spatial transfer
self.space_transfer = space_transfer_class(
fine_prob=self.fine.prob, coarse_prob=self.coarse.prob, params=space_transfer_params
)
@staticmethod
def get_transfer_matrix_Q(f_nodes, c_nodes):
"""
Helper routine to quickly define transfer matrices from a coarse set
to a fine set of nodes (fully Lagrangian)
Args:
f_nodes: fine nodes (size nF)
c_nodes: coarse nodes (size nC)
Returns:
matrix containing the interpolation weights (shape (nF, nC))
"""
approx = LagrangeApproximation(c_nodes)
return approx.getInterpolationMatrix(f_nodes)
def restrict(self):
"""
Space-time restriction routine
The routine applies the spatial restriction operator to the fine values on the fine nodes, then reevaluates f
on the coarse level. This is used for the first part of the FAS correction tau via integration. The second part
is the integral over the fine values, restricted to the coarse level. Finally, possible tau corrections on the
fine level are restricted as well.
"""
# get data for easier access
F = self.fine
G = self.coarse
PG = G.prob
SF = F.sweep
SG = G.sweep
# only if the level is unlocked at least by prediction
if not F.status.unlocked:
raise UnlockError('fine level is still locked, cannot use data from there')
# restrict fine values in space
tmp_u = []
for m in range(1, SF.coll.num_nodes + 1):
tmp_u.append(self.space_transfer.restrict(F.u[m]))
# restrict collocation values
G.u[0] = self.space_transfer.restrict(F.u[0])
for n in range(1, SG.coll.num_nodes + 1):
G.u[n] = self.Rcoll[n - 1, 0] * tmp_u[0]
for m in range(1, SF.coll.num_nodes):
G.u[n] += self.Rcoll[n - 1, m] * tmp_u[m]
# re-evaluate f on coarse level
G.f[0] = PG.eval_f(G.u[0], G.time)
for m in range(1, SG.coll.num_nodes + 1):
G.f[m] = PG.eval_f(G.u[m], G.time + G.dt * SG.coll.nodes[m - 1])
# build coarse level tau correction part
tauG = G.sweep.integrate()
# build fine level tau correction part
tauF = F.sweep.integrate()
# restrict fine level tau correction part in space
tmp_tau = []
for m in range(SF.coll.num_nodes):
tmp_tau.append(self.space_transfer.restrict(tauF[m]))
# restrict fine level tau correction part in collocation
tauFG = []
for n in range(1, SG.coll.num_nodes + 1):
tauFG.append(self.Rcoll[n - 1, 0] * tmp_tau[0])
for m in range(1, SF.coll.num_nodes):
tauFG[-1] += self.Rcoll[n - 1, m] * tmp_tau[m]
# build tau correction
for m in range(SG.coll.num_nodes):
G.tau[m] = tauFG[m] - tauG[m]
if F.tau[0] is not None:
# restrict possible tau correction from fine in space
tmp_tau = []
for m in range(SF.coll.num_nodes):
tmp_tau.append(self.space_transfer.restrict(F.tau[m]))
# restrict possible tau correction from fine in collocation
for n in range(SG.coll.num_nodes):
for m in range(SF.coll.num_nodes):
G.tau[n] += self.Rcoll[n, m] * tmp_tau[m]
else:
pass
# save u and rhs evaluations for interpolation
for m in range(1, SG.coll.num_nodes + 1):
G.uold[m] = PG.dtype_u(G.u[m])
G.fold[m] = PG.dtype_f(G.f[m])
# works as a predictor
G.status.unlocked = True
return None
def prolong(self):
"""
Space-time prolongation routine
This routine applies the spatial prolongation routine to the difference between the computed and the restricted
values on the coarse level and then adds this difference to the fine values as coarse correction.
"""
# get data for easier access
F = self.fine
G = self.coarse
PF = F.prob
SF = F.sweep
SG = G.sweep
# only of the level is unlocked at least by prediction or restriction
if not G.status.unlocked:
raise UnlockError('coarse level is still locked, cannot use data from there')
# build coarse correction
# interpolate values in space first
tmp_u = []
for m in range(1, SG.coll.num_nodes + 1):
tmp_u.append(self.space_transfer.prolong(G.u[m] - G.uold[m]))
# interpolate values in collocation
for n in range(1, SF.coll.num_nodes + 1):
for m in range(SG.coll.num_nodes):
F.u[n] += self.Pcoll[n - 1, m] * tmp_u[m]
# re-evaluate f on fine level
for m in range(1, SF.coll.num_nodes + 1):
F.f[m] = PF.eval_f(F.u[m], F.time + F.dt * SF.coll.nodes[m - 1])
return None
def prolong_f(self):
"""
Space-time prolongation routine w.r.t. the rhs f
This routine applies the spatial prolongation routine to the difference between the computed and the restricted
values on the coarse level and then adds this difference to the fine values as coarse correction.
"""
# get data for easier access
F = self.fine
G = self.coarse
SF = F.sweep
SG = G.sweep
# only of the level is unlocked at least by prediction or restriction
if not G.status.unlocked:
raise UnlockError('coarse level is still locked, cannot use data from there')
# build coarse correction
# interpolate values in space first
tmp_u = []
tmp_f = []
for m in range(1, SG.coll.num_nodes + 1):
tmp_u.append(self.space_transfer.prolong(G.u[m] - G.uold[m]))
tmp_f.append(self.space_transfer.prolong(G.f[m] - G.fold[m]))
# interpolate values in collocation
for n in range(1, SF.coll.num_nodes + 1):
for m in range(SG.coll.num_nodes):
F.u[n] += self.Pcoll[n - 1, m] * tmp_u[m]
F.f[n] += self.Pcoll[n - 1, m] * tmp_f[m]
return None
| 8,395 | 33.838174 | 119 | py |
pySDC | pySDC-master/pySDC/core/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/core/Controller.py | import logging
import os
import sys
import numpy as np
from pySDC.core.BaseTransfer import base_transfer
from pySDC.helpers.pysdc_helper import FrozenClass
from pySDC.implementations.convergence_controller_classes.check_convergence import CheckConvergence
from pySDC.implementations.hooks.default_hook import DefaultHooks
# short helper class to add params as attributes
class _Pars(FrozenClass):
def __init__(self, params):
self.mssdc_jac = True
self.predict_type = None
self.all_to_done = False
self.logger_level = 20
self.log_to_file = False
self.dump_setup = True
self.fname = 'run_pid' + str(os.getpid()) + '.log'
self.use_iteration_estimator = False
for k, v in params.items():
setattr(self, k, v)
self._freeze()
class controller(object):
"""
Base abstract controller class
"""
def __init__(self, controller_params, description, useMPI=None):
"""
Initialization routine for the base controller
Args:
controller_params (dict): parameter set for the controller and the steps
"""
self.useMPI = useMPI
# check if we have a hook on this list. If not, use default class.
self.__hooks = []
hook_classes = [DefaultHooks]
user_hooks = controller_params.get('hook_class', [])
hook_classes += user_hooks if type(user_hooks) == list else [user_hooks]
[self.add_hook(hook) for hook in hook_classes]
controller_params['hook_class'] = controller_params.get('hook_class', hook_classes)
for hook in self.hooks:
hook.pre_setup(step=None, level_number=None)
self.params = _Pars(controller_params)
self.__setup_custom_logger(self.params.logger_level, self.params.log_to_file, self.params.fname)
self.logger = logging.getLogger('controller')
if self.params.use_iteration_estimator and self.params.all_to_done:
self.logger.warning('all_to_done and use_iteration_estimator set, will ignore all_to_done')
self.base_convergence_controllers = [CheckConvergence]
self.setup_convergence_controllers(description)
@staticmethod
def __setup_custom_logger(level=None, log_to_file=None, fname=None):
"""
Helper function to set main parameters for the logging facility
Args:
level (int): level of logging
log_to_file (bool): flag to turn on/off logging to file
fname (str):
"""
assert type(level) is int
# specify formats and handlers
if log_to_file:
file_formatter = logging.Formatter(
fmt='%(asctime)s - %(name)s - %(module)s - %(funcName)s - %(lineno)d - %(levelname)s: %(message)s'
)
if os.path.isfile(fname):
file_handler = logging.FileHandler(fname, mode='a')
else:
file_handler = logging.FileHandler(fname, mode='w')
file_handler.setFormatter(file_formatter)
else:
file_handler = None
std_formatter = logging.Formatter(fmt='%(name)s - %(levelname)s: %(message)s')
std_handler = logging.StreamHandler(sys.stdout)
std_handler.setFormatter(std_formatter)
# instantiate logger
logger = logging.getLogger('')
# remove handlers from previous calls to controller
for handler in logger.handlers[:]:
logger.removeHandler(handler)
logger.setLevel(level)
logger.addHandler(std_handler)
if log_to_file:
logger.addHandler(file_handler)
else:
pass
def add_hook(self, hook):
"""
Add a hook to the controller which will be called in addition to all other hooks whenever something happens.
The hook is only added if a hook of the same class is not already present.
Args:
hook (pySDC.Hook): A hook class that is derived from the core hook class
Returns:
None
"""
if hook not in [type(me) for me in self.hooks]:
self.__hooks += [hook()]
def welcome_message(self):
out = (
"Welcome to the one and only, really very astonishing and 87.3% bug free"
+ "\n"
+ r" _____ _____ _____ "
+ "\n"
+ r" / ____| __ \ / ____|"
+ "\n"
+ r" _ __ _ _| (___ | | | | | "
+ "\n"
+ r" | '_ \| | | |\___ \| | | | | "
+ "\n"
+ r" | |_) | |_| |____) | |__| | |____ "
+ "\n"
+ r" | .__/ \__, |_____/|_____/ \_____|"
+ "\n"
+ r" | | __/ | "
+ "\n"
+ r" |_| |___/ "
+ "\n"
+ r" "
)
self.logger.info(out)
def dump_setup(self, step, controller_params, description):
"""
Helper function to dump the setup used for this controller
Args:
step (pySDC.Step.step): the step instance (will/should be the first one only)
controller_params (dict): controller parameters
description (dict): description of the problem
"""
self.welcome_message()
out = 'Setup overview (--> user-defined, -> dependency) -- BEGIN'
self.logger.info(out)
out = '----------------------------------------------------------------------------------------------------\n\n'
out += 'Controller: %s\n' % self.__class__
for k, v in vars(self.params).items():
if not k.startswith('_'):
if k in controller_params:
out += '--> %s = %s\n' % (k, v)
else:
out += ' %s = %s\n' % (k, v)
out += '\nStep: %s\n' % step.__class__
for k, v in vars(step.params).items():
if not k.startswith('_'):
if k in description['step_params']:
out += '--> %s = %s\n' % (k, v)
else:
out += ' %s = %s\n' % (k, v)
out += ' Level: %s\n' % step.levels[0].__class__
for L in step.levels:
out += ' Level %2i\n' % L.level_index
for k, v in vars(L.params).items():
if not k.startswith('_'):
if k in description['level_params']:
out += '--> %s = %s\n' % (k, v)
else:
out += ' %s = %s\n' % (k, v)
out += '--> Problem: %s\n' % L.prob.__class__
for k, v in L.prob.params.items():
if k in description['problem_params']:
out += '--> %s = %s\n' % (k, v)
else:
out += ' %s = %s\n' % (k, v)
out += '--> Data type u: %s\n' % L.prob.dtype_u
out += '--> Data type f: %s\n' % L.prob.dtype_f
out += '--> Sweeper: %s\n' % L.sweep.__class__
for k, v in vars(L.sweep.params).items():
if not k.startswith('_'):
if k in description['sweeper_params']:
out += '--> %s = %s\n' % (k, v)
else:
out += ' %s = %s\n' % (k, v)
out += '--> Collocation: %s\n' % L.sweep.coll.__class__
if len(step.levels) > 1:
if 'base_transfer_class' in description and description['base_transfer_class'] is not base_transfer:
out += '--> Base Transfer: %s\n' % step.base_transfer.__class__
else:
out += ' Base Transfer: %s\n' % step.base_transfer.__class__
for k, v in vars(step.base_transfer.params).items():
if not k.startswith('_'):
if k in description['base_transfer_params']:
out += '--> %s = %s\n' % (k, v)
else:
out += ' %s = %s\n' % (k, v)
out += '--> Space Transfer: %s\n' % step.base_transfer.space_transfer.__class__
for k, v in vars(step.base_transfer.space_transfer.params).items():
if not k.startswith('_'):
if k in description['space_transfer_params']:
out += '--> %s = %s\n' % (k, v)
else:
out += ' %s = %s\n' % (k, v)
out += '\n'
out += self.get_convergence_controllers_as_table(description)
out += '\n'
self.logger.info(out)
out = '----------------------------------------------------------------------------------------------------'
self.logger.info(out)
out = 'Setup overview (--> user-defined, -> dependency) -- END\n'
self.logger.info(out)
def run(self, u0, t0, Tend):
"""
Abstract interface to the run() method
Args:
u0: initial values
t0 (float): starting time
Tend (float): ending time
"""
raise NotImplementedError('ERROR: controller has to implement run(self, u0, t0, Tend)')
@property
def hooks(self):
"""
Getter for the hooks
Returns:
pySDC.Hooks.hooks: hooks
"""
return self.__hooks
def setup_convergence_controllers(self, description):
'''
Setup variables needed for convergence controllers, notably a list containing all of them and a list containing
their order. Also, we add the `CheckConvergence` convergence controller, which takes care of maximum iteration
count or a residual based stopping criterion, as well as all convergence controllers added to the description.
Args:
description (dict): The description object used to instantiate the controller
Returns:
None
'''
self.convergence_controllers = []
self.convergence_controller_order = []
conv_classes = description.get('convergence_controllers', {})
# instantiate the convergence controllers
for conv_class, params in conv_classes.items():
self.add_convergence_controller(conv_class, description=description, params=params)
return None
def add_convergence_controller(self, convergence_controller, description, params=None, allow_double=False):
'''
Add an individual convergence controller to the list of convergence controllers and instantiate it.
Afterwards, the order of the convergence controllers is updated.
Args:
convergence_controller (pySDC.ConvergenceController): The convergence controller to be added
description (dict): The description object used to instantiate the controller
params (dict): Parameters for the convergence controller
allow_double (bool): Allow adding the same convergence controller multiple times
Returns:
None
'''
# check if we passed any sort of special params
params = {**({} if params is None else params), 'useMPI': self.useMPI}
# check if we already have the convergence controller or if we want to have it multiple times
if convergence_controller not in [type(me) for me in self.convergence_controllers] or allow_double:
self.convergence_controllers.append(convergence_controller(self, params, description))
# update ordering
orders = [C.params.control_order for C in self.convergence_controllers]
self.convergence_controller_order = np.arange(len(self.convergence_controllers))[np.argsort(orders)]
return None
def get_convergence_controllers_as_table(self, description):
'''
This function is for debugging purposes to keep track of the different convergence controllers and their order.
Args:
description (dict): Description of the problem
Returns:
str: Table of convergence controllers as a string
'''
out = 'Active convergence controllers:'
out += '\n | # | order | convergence controller'
out += '\n----+----+-------+---------------------------------------------------------------------------------------'
for i in range(len(self.convergence_controllers)):
C = self.convergence_controllers[self.convergence_controller_order[i]]
# figure out how the convergence controller was added
if type(C) in description.get('convergence_controllers', {}).keys(): # added by user
user_added = '--> '
elif type(C) in self.base_convergence_controllers: # added by default
user_added = ' '
else: # added as dependency
user_added = ' -> '
out += f'\n{user_added}|{i:3} | {C.params.control_order:5} | {type(C).__name__}'
return out
def return_stats(self):
"""
Return the merged stats from all hooks
Returns:
dict: Merged stats from all hooks
"""
stats = {}
for hook in self.hooks:
stats = {**stats, **hook.return_stats()}
return stats
| 13,707 | 39.081871 | 124 | py |
pySDC | pySDC-master/pySDC/core/SpaceTransfer.py | import logging
from pySDC.helpers.pysdc_helper import FrozenClass
# short helper class to add params as attributes
class _Pars(FrozenClass):
def __init__(self, pars):
self.periodic = False
self.equidist_nested = True
self.iorder = 2
self.rorder = 2
for k, v in pars.items():
setattr(self, k, v)
# freeze class, no further attributes allowed from this point
self._freeze()
class space_transfer(object):
"""
Abstract space_transfer class
Attributes:
params (__Pars): parameters given by the user
logger: custom logger for transfer-related logging
fine_prob (pySDC.Problem.ptype): reference to the fine problem
coarse_prob (pySDC.Problem.ptype): reference to the coarse problem
"""
def __init__(self, fine_prob, coarse_prob, space_transfer_params):
"""
Initialization routine
Args:
fine_prob (pySDC.Problem.ptype): reference to the fine problem
coarse_prob (pySDC.Problem.ptype): reference to the coarse problem
space_transfer_params (dict): user-defined parameters
"""
self.params = _Pars(space_transfer_params)
# set up logger
self.logger = logging.getLogger('space-transfer')
# just copy by object
self.fine_prob = fine_prob
self.coarse_prob = coarse_prob
def restrict(self, F):
"""
Abstract interface for restriction in space
Args:
F: the fine level data (easier to access than via the fine attribute)
"""
raise NotImplementedError('ERROR: space_transfer has to implement restrict(self, F)')
def prolong(self, G):
"""
Abstract interface for prolongation in space
Args:
G: the coarse level data (easier to access than via the coarse attribute)
"""
raise NotImplementedError('ERROR: space_transfer has to implement prolong(self, G)')
| 2,000 | 29.318182 | 93 | py |
pySDC | pySDC-master/pySDC/core/Step.py | import logging
from pySDC.core import Level as levclass
from pySDC.core.BaseTransfer import base_transfer
from pySDC.core.Errors import ParameterError
from pySDC.helpers.pysdc_helper import FrozenClass
# short helper class to add params as attributes
class _Pars(FrozenClass):
def __init__(self, params):
self.maxiter = None
for k, v in params.items():
setattr(self, k, v)
# freeze class, no further attributes allowed from this point
self._freeze()
# short helper class to bundle all status variables
class _Status(FrozenClass):
"""
This class carries the status of the step. All variables that the core SDC / PFASST functionality depend on are
initialized here, while the convergence controllers are allowed to add more variables in a controlled fashion
later on using the `add_variable` function.
"""
def __init__(self):
self.iter = None
self.stage = None
self.slot = None
self.first = None
self.last = None
self.pred_cnt = None
self.done = None
self.force_done = None
self.force_continue = False
self.prev_done = None
self.time_size = None
self.diff_old_loc = None
self.diff_first_loc = None
# freeze class, no further attributes allowed from this point
self._freeze()
class step(FrozenClass):
"""
Step class, referencing most of the structure needed for the time-stepping
This class bundles multiple levels and the corresponding transfer operators and is used by the controller
(e.g. SDC and MLSDC). Status variables like the current time are hidden via properties and setters methods.
Attributes:
params (__Pars): parameters given by the user
status (__Status): status class for the step
logger: custom logger for step-related logging
levels (list): list of levels
"""
def __init__(self, description):
"""
Initialization routine
Args:
description (dict): parameters given by the user, will be added as attributes
"""
# set params and status
self.params = _Pars(description.get('step_params', {}))
self.status = _Status()
# set up logger
self.logger = logging.getLogger('step')
# empty attributes
self.__transfer_dict = {}
self.base_transfer = None
self.levels = []
self.__prev = None
self.__next = None
# freeze class, no further attributes allowed from this point
self._freeze()
# create hierarchy of levels
self.__generate_hierarchy(description)
def __generate_hierarchy(self, descr):
"""
Routine to generate the level hierarchy for a single step
This makes the explicit generation of levels in the frontend obsolete and hides a few dirty hacks here and
there.
Args:
descr (dict): dictionary containing the description of the levels as list per key
"""
if 'dtype_u' in descr:
raise ParameterError(
'Deprecated parameter dtype_u, please remove from description dictionary and specify '
'directly in the problem class'
)
if 'dtype_f' in descr:
raise ParameterError(
'Deprecated parameter dtype_f, please remove from description dictionary and specify '
'directly in the problem class'
)
# assert the existence of all the keys we need to set up at least on level
essential_keys = ['problem_class', 'sweeper_class', 'sweeper_params', 'level_params']
for key in essential_keys:
if key not in descr:
msg = 'need %s to instantiate step, only got %s' % (key, str(descr.keys()))
self.logger.error(msg)
raise ParameterError(msg)
descr['problem_params'] = descr.get('problem_params', {})
# check if base_transfer class is specified
descr['base_transfer_class'] = descr.get('base_transfer_class', base_transfer)
# check if base_transfer parameters are needed
descr['base_transfer_params'] = descr.get('base_transfer_params', {})
# check if space_transfer class is specified
descr['space_transfer_class'] = descr.get('space_transfer_class', {})
# check if space_transfer parameters are needed
descr['space_transfer_params'] = descr.get('space_transfer_params', {})
# convert problem-dependent parameters consisting of dictionary of lists to a list of dictionaries with only a
# single entry per key, one dict per level
pparams_list = self.__dict_to_list(descr['problem_params'])
lparams_list = self.__dict_to_list(descr['level_params'])
swparams_list = self.__dict_to_list(descr['sweeper_params'])
# put this newly generated list into the description dictionary (copy to avoid changing the original one)
descr_new = descr.copy()
descr_new['problem_params'] = pparams_list
descr_new['level_params'] = lparams_list
descr_new['sweeper_params'] = swparams_list
# generate list of dictionaries out of the description
descr_list = self.__dict_to_list(descr_new)
# sanity check: is there a base_transfer class? Is there one even if only a single level is specified?
if len(descr_list) > 1 and not descr_new['space_transfer_class']:
msg = 'need %s to instantiate step, only got %s' % ('space_transfer_class', str(descr_new.keys()))
self.logger.error(msg)
raise ParameterError(msg)
if len(descr_list) == 1 and (
descr_new['space_transfer_class'] or descr_new['base_transfer_class'] is not base_transfer
):
self.logger.warning('you have specified transfer classes, but only a single level')
# generate levels, register and connect if needed
for l in range(len(descr_list)):
L = levclass.level(
problem_class=descr_list[l]['problem_class'],
problem_params=descr_list[l]['problem_params'],
sweeper_class=descr_list[l]['sweeper_class'],
sweeper_params=descr_list[l]['sweeper_params'],
level_params=descr_list[l]['level_params'],
level_index=l,
)
self.levels.append(L)
if l > 0:
self.connect_levels(
base_transfer_class=descr_new['base_transfer_class'],
base_transfer_params=descr_list[l]['base_transfer_params'],
space_transfer_class=descr_list[l]['space_transfer_class'],
space_transfer_params=descr_list[l]['space_transfer_params'],
fine_level=self.levels[l - 1],
coarse_level=self.levels[l],
)
@staticmethod
def __dict_to_list(in_dict):
"""
Straightforward helper function to convert dictionary of list to list of dictionaries
Args:
in_dict (dict): dictionary of lists
Returns:
list of dictionaries
"""
max_val = 1
for _, v in in_dict.items():
if type(v) is list:
max_val = max(max_val, len(v))
else:
pass
ld = [{} for _ in range(max_val)]
for d in range(len(ld)):
for k, v in in_dict.items():
if type(v) is not list:
ld[d][k] = v
else:
ld[d][k] = v[min(d, len(v) - 1)]
return ld
def connect_levels(
self,
base_transfer_class,
base_transfer_params,
space_transfer_class,
space_transfer_params,
fine_level,
coarse_level,
):
"""
Routine to couple levels with base_transfer operators
Args:
base_transfer_class: the class which can do transfer between the two space-time levels
base_transfer_params (dict): parameters for the space_transfer class
space_transfer_class: the user-defined class which can do spatial transfer
space_transfer_params (dict): parameters for the base_transfer class
fine_level (pySDC.Level.level): the fine level
coarse_level (pySDC.Level.level): the coarse level
"""
# create new instance of the specific base_transfer class
self.base_transfer = base_transfer_class(
fine_level, coarse_level, base_transfer_params, space_transfer_class, space_transfer_params
)
# use base_transfer dictionary twice to set restrict and prolong operator
self.__transfer_dict[(fine_level, coarse_level)] = self.base_transfer.restrict
if self.base_transfer.params.finter:
self.__transfer_dict[(coarse_level, fine_level)] = self.base_transfer.prolong_f
else:
self.__transfer_dict[(coarse_level, fine_level)] = self.base_transfer.prolong
def transfer(self, source, target):
"""
Wrapper routine to ease the call of the transfer functions
This function can be called in the multilevel stepper (e.g. MLSDC), passing a source and a target level.
Using the transfer dictionary, the calling stepper does not need to specify whether to use restrict of
prolong.
Args:
source (pySDC.Level.level): source level
target (pySDC.Level.level): target level
"""
self.__transfer_dict[(source, target)]()
def reset_step(self):
"""
Routine so clean-up step structure and the corresp. levels for further uses
"""
# reset all levels
for l in self.levels:
l.reset_level()
def init_step(self, u0):
"""
Initialization routine for a new step.
This routine uses initial values u0 to set up the u[0] values at the finest level
Args:
u0 (dtype_u): initial values
"""
assert len(self.levels) >= 1
assert len(self.levels[0].u) >= 1
# pass u0 to u[0] on the finest level 0
P = self.levels[0].prob
self.levels[0].u[0] = P.dtype_u(u0)
@property
def prev(self):
"""
Getter for previous step
Returns:
prev
"""
return self.__prev
@prev.setter
def prev(self, p):
"""
Setter for previous step
Args:
p: new previous step
"""
self.__prev = p
@property
def next(self):
"""
Getter for next step
Returns:
prev
"""
return self.__next
@next.setter
def next(self, p):
"""
Setter for next step
Args:
p: new next step
"""
self.__next = p
@property
def dt(self):
"""
Getter for current time-step size
Returns:
float: dt of level[0]
"""
return self.levels[0].dt
@property
def time(self):
"""
Getter for current time
Returns:
float: time of level[0]
"""
return self.levels[0].time
| 11,352 | 33.195783 | 118 | py |
pySDC | pySDC-master/pySDC/core/Problem.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description
-----------
Module containing the base Problem class for pySDC
"""
import logging
from pySDC.core.Common import RegisterParams
class WorkCounter(object):
"""
Utility class for counting iterations.
Contains one attribute `niter` initialized to zero during
instantiation, which can be incremented by calling object as
a function, e.g
>>> count = WorkCounter() # => niter = 0
>>> count() # => niter = 1
>>> count() # => niter = 2
"""
def __init__(self):
self.niter = 0
def __call__(self, *args, **kwargs):
# *args and **kwargs are necessary for gmres
self.niter += 1
class ptype(RegisterParams):
"""
Prototype class for problems, just defines the attributes essential to get started.
Parameters
----------
init : list of args
Argument(s) used to initialize data types.
dtype_u : type
Variable data type. Should generate a data variable using dtype_u(init).
dtype_f : type
RHS data type. Should generate a data variable using dtype_f(init).
Attributes
----------
logger: logging.Logger
custom logger for problem-related logging.
"""
logger = logging.getLogger('problem')
dtype_u = None
dtype_f = None
def __init__(self, init):
self.work_counters = {} # Dictionary to store WorkCounter objects
self.init = init # Initialization parameter to instantiate data types
@property
def u_init(self):
"""Generate a data variable for u"""
return self.dtype_u(self.init)
@property
def f_init(self):
"""Generate a data variable for RHS"""
return self.dtype_f(self.init)
def eval_f(self, u, t):
"""
Abstract interface to RHS computation of the ODE
Parameters
----------
u : dtype_u
Current values.
t : float
Current time.
Returns
-------
f : dtype_f
The RHS values.
"""
raise NotImplementedError('ERROR: problem has to implement eval_f(self, u, t)')
def apply_mass_matrix(self, u): # pragma: no cover
"""Default mass matrix : identity"""
return u
def generate_scipy_reference_solution(self, eval_rhs, t, u_init=None, t_init=None, **kwargs):
"""
Compute a reference solution using `scipy.solve_ivp` with very small tolerances.
Keep in mind that scipy needs the solution to be a one dimensional array. If you are solving something higher
dimensional, you need to make sure the function `eval_rhs` takes a flattened one-dimensional version as an input
and output, but reshapes to whatever the problem needs for evaluation.
The keyword arguments will be passed to `scipy.solve_ivp`. You should consider passing `method='BDF'` for stiff
problems and to accelerate that you can pass a function that evaluates the Jacobian with arguments `jac(t, u)`
as `jac=jac`.
Args:
eval_rhs (function): Function evaluate the full right hand side. Must have signature `eval_rhs(float: t, numpy.1darray: u)`
t (float): current time
u_init (pySDC.implementations.problem_classes.Lorenz.dtype_u): initial conditions for getting the exact solution
t_init (float): the starting time
Returns:
numpy.ndarray: Reference solution
"""
import numpy as np
from scipy.integrate import solve_ivp
tol = 100 * np.finfo(float).eps
u_init = self.u_exact(t=0) if u_init is None else u_init * 1.0
t_init = 0 if t_init is None else t_init
u_shape = u_init.shape
return (
solve_ivp(eval_rhs, (t_init, t), u_init.flatten(), rtol=tol, atol=tol, **kwargs).y[:, -1].reshape(u_shape)
)
| 3,945 | 30.31746 | 135 | py |
pySDC | pySDC-master/pySDC/core/Errors.py | class DataError(Exception):
"""
Error Class handling/indicating problems with data types
"""
pass
class ParameterError(Exception):
"""
Error Class handling/indicating problems with parameters (mostly within dictionaries)
"""
pass
class UnlockError(Exception):
"""
Error class handling/indicating unlocked levels
"""
pass
class CollocationError(Exception):
"""
Error class handling/indicating problems with the collocation
"""
pass
class ConvergenceError(Exception):
"""
Error class handling/indicating problems with convergence
"""
pass
class TransferError(Exception):
"""
Error class handling/indicating problems with the transfer processes
"""
pass
class CommunicationError(Exception):
"""
Error class handling/indicating problems with the communication
"""
pass
class ControllerError(Exception):
"""
Error class handling/indicating problems with the controller
"""
pass
class ProblemError(Exception):
"""
Error class handling/indicating problems with the problem classes
"""
pass
class ReadOnlyError(Exception): # pragma: no cover
"""
Exception thrown when setting a read-only class attribute
"""
def __init__(self, name):
super().__init__(f'cannot set read-only attribute {name}')
| 1,382 | 16.2875 | 89 | py |
pySDC | pySDC-master/pySDC/projects/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/deprecated/node_failure/emulate_hard_faults.py | import copy as cp
import random as rd
import numpy as np
# dirty, but easiest: global variables to control the injection and recovery
hard_iter = None
hard_step = None
strategy = None
hard_random = 0.0
hard_stats = []
refdata = None
def hard_fault_injection(S):
"""
Injects a node failure and recovers using a defined strategy, can be called in the controller
Args:
S: the current step data structure
Returns:
the step after a node failure
"""
# name global variables for this routine
global hard_iter, hard_step, strategy, hard_stats, hard_random, refdata
# set the seed in the first iteration, using the process number for reproducibility
if S.status.iter == 1:
rd.seed(S.status.slot)
# draw random number and check if we are below our threshold (hard_random gives percentage)
if strategy == 'NOFAULT':
doit = rd.random() < hard_random
if doit:
hard_stats.append((S.status.slot, S.status.iter, S.time))
else:
if refdata is not None:
# noinspection PyTypeChecker
doit = np.any(np.all([S.status.slot, S.status.iter, S.time] == refdata, axis=1))
else:
doit = False
# if we set step and iter, inject and recover (if faults are supposed to occur)
if ((hard_step == S.status.slot and hard_iter == S.status.iter) or doit) and strategy != 'NOFAULT':
print('things went wrong here: step %i -- iteration %i -- time %e' % (S.status.slot, S.status.iter, S.time))
# add incident to statistics data type
hard_stats.append((S.status.slot, S.status.iter, S.time))
# ok, that's a little bit of cheating... we would need to retrieve the current residual and iteration count
# from the previous process, but this does not matter here
res = cp.deepcopy(S.levels[-1].status.residual)
niter = cp.deepcopy(S.status.iter) - 1
time = cp.deepcopy(S.time)
# fault injection, set everything to zero or null or whatever
S.reset_step()
for lvl in S.levels:
lvl.status.time = time
# recovery
if strategy == 'SPREAD':
S = hard_fault_correction_spread(S)
elif strategy == 'INTERP':
S = hard_fault_correction_interp(S)
elif strategy == 'INTERP_PREDICT':
S = hard_fault_correction_interp_predict(S, res, niter)
elif strategy == 'SPREAD_PREDICT':
S = hard_fault_correction_spread_predict(S, res, niter)
else:
raise NotImplementedError('recovery strategy not implemented')
return S
# Here come the recovery strategies
def hard_fault_correction_spread(S):
"""
do nothing, just get new initial conditions and do sweep predict (copy)
strategy '1-sided'
Args:
S: the current step (no data available)
Returns:
S: recovered step
"""
# get new initial data, either from previous processes or "from scratch"
if not S.status.first:
ufirst = S.prev.levels[0].prob.dtype_u(S.prev.levels[0].uend)
else:
ufirst = S.levels[0].prob.u_exact(S.time)
L = S.levels[0]
# set data
L.u[0] = L.prob.dtype_u(ufirst)
# call prediction of the sweeper (copy the values to all nodes)
L.sweep.predict()
# compute uend
L.sweep.compute_end_point()
# proceed with fine sweep
S.status.stage = 'IT_FINE_SWEEP'
return S
def hard_fault_correction_interp(S):
"""
get new initial conditions from left and uend from right, then interpolate
strategy '2-sided'
Args:
S: the current step (no data available)
Returns:
S: recovered step
"""
# get new initial data, either from previous processes or "from scratch"
if not S.status.first:
ufirst = S.prev.levels[0].prob.dtype_u(S.prev.levels[0].uend)
else:
ufirst = S.levels[0].prob.u_exact(S.time)
# if I'm not the last one, get uend from following process
# otherwise set uend = u0, so that interpolation is a copy
if not S.status.last:
ulast = S.next.levels[0].prob.dtype_u(S.next.levels[0].u[0])
else:
ulast = ufirst
L = S.levels[0]
# set u0, and interpolate the rest
# evaluate f for each node (fixme: we could try to interpolate here as well)
L.u[0] = L.prob.dtype_u(ufirst)
L.f[0] = L.prob.eval_f(L.u[0], L.time)
for m in range(1, L.sweep.coll.num_nodes + 1):
L.u[m] = (1 - L.sweep.coll.nodes[m - 1]) * ufirst + L.sweep.coll.nodes[m - 1] * ulast
L.f[m] = L.prob.eval_f(L.u[m], L.time + L.dt * L.sweep.coll.nodes[m - 1])
# set fine level to active
L.status.unlocked = True
# compute uend
L.sweep.compute_end_point()
# proceed with fine sweep
S.status.stage = 'IT_FINE_SWEEP'
return S
def hard_fault_correction_spread_predict(S, res, niter):
"""
get new initial conditions from left, copy data to nodes and correct on coarse level
strategy '1-sided+corr'
Args:
S: the current step (no data available)
res: the target residual
niter: the max. number of iteration
Returns:
S: recovered step
"""
# get new initial data, either from previous processes or "from scratch"
if not S.status.first:
ufirst = S.prev.levels[0].prob.dtype_u(S.prev.levels[0].uend)
else:
ufirst = S.levels[0].prob.u_exact(S.time)
L = S.levels[0]
# set u0, and copy
L.u[0] = L.prob.dtype_u(ufirst)
L.sweep.predict()
# transfer to the coarsest level (overwrite values)
for l in range(1, len(S.levels)):
S.transfer(source=S.levels[l - 1], target=S.levels[l])
# compute preliminary residual (just to set it)
S.levels[-1].status.updated = True
S.levels[-1].sweep.compute_residual()
# keep sweeping until either k < niter or the current residual is lower than res (niter, res was stored before
# fault injection (lazy, should get this from the previous process)
k = 0
if res is not None:
while S.levels[-1].status.residual > res and k < niter:
k += 1
print(S.levels[-1].status.residual, res, k)
S.levels[-1].sweep.update_nodes()
S.levels[-1].sweep.compute_residual()
# transfer back to finest level (coarse correction!)
for l in range(len(S.levels) - 1, 0, -1):
S.transfer(source=S.levels[l], target=S.levels[l - 1])
# compute uend
L.sweep.compute_end_point()
# proceed with fine sweep
S.status.stage = 'IT_FINE_SWEEP'
return S
def hard_fault_correction_interp_predict(S, res, niter):
"""
get new initial conditions from left and uend from right, interpolate data to nodes and correct on coarse level
strategy '2-sided+corr'
Args:
S: the current step (no data available)
res: the target residual
niter: the max. number of iteration
Returns:
S: recovered step
"""
# get new initial data, either from previous processes or "from scratch"
if not S.status.first:
ufirst = S.prev.levels[0].prob.dtype_u(S.prev.levels[0].uend)
else:
ufirst = S.levels[0].prob.u_exact(S.time)
# if I'm not the last one, get uend from following process
# otherwise set uend = u0, so that interpolation is a copy
if not S.status.last:
ulast = S.next.levels[0].prob.dtype_u(S.next.levels[0].u[0])
else:
ulast = ufirst
L = S.levels[0]
# set u0, and interpolate the rest
# evaluate f for each node (fixme: we could try to interpolate here as well)
L.u[0] = L.prob.dtype_u(ufirst)
L.f[0] = L.prob.eval_f(L.u[0], L.time)
for m in range(1, L.sweep.coll.num_nodes + 1):
L.u[m] = (1 - L.sweep.coll.nodes[m - 1]) * ufirst + L.sweep.coll.nodes[m - 1] * ulast
L.f[m] = L.prob.eval_f(L.u[m], L.time + L.dt * L.sweep.coll.nodes[m - 1])
# set fine level to active
L.status.unlocked = True
# transfer to the coarsest level (overwrite values)
for l in range(1, len(S.levels)):
S.transfer(source=S.levels[l - 1], target=S.levels[l])
# compute preliminary residual (just to set it)
S.levels[-1].status.updated = True
S.levels[-1].sweep.compute_residual()
# keep sweeping until either k < niter or the current residual is lower than res (niter, res was stored before
# fault injection (lazy, should get this from the previous process)
k = 0
if res is not None:
while S.levels[-1].status.residual > res and k < niter:
k += 1
print(S.levels[-1].status.residual, res, k)
S.levels[-1].sweep.update_nodes()
S.levels[-1].sweep.compute_residual()
# transfer back to finest level (coarse correction!)
for l in range(len(S.levels) - 1, 0, -1):
S.transfer(source=S.levels[l], target=S.levels[l - 1])
# compute uend
L.sweep.compute_end_point()
# proceed with fine sweep
S.status.stage = 'IT_FINE_SWEEP'
return S
| 9,040 | 31.174377 | 116 | py |
pySDC | pySDC-master/pySDC/projects/deprecated/node_failure/postproc_hard_faults_test.py | import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
# import os
def create_plots(setup, cwd=''):
"""
Function to create heatmaps for faults at different steps and iterations
Args:
setup (str): name of the setup (heat or advection)
cwd: current working directory
"""
axis_font = {'fontname': 'Arial', 'size': '8', 'family': 'serif'}
fs = 8
fields = [
(setup + '_results_hf_SPREAD.npz', 'SPREAD'),
(setup + '_results_hf_SPREAD_PREDICT.npz', 'SPREAD_PREDICT'),
(setup + '_results_hf_INTERP.npz', 'INTERP'),
(setup + '_results_hf_INTERP_PREDICT.npz', 'INTERP_PREDICT'),
]
vmin = 99
vmax = 0
for file, _ in fields:
infile = np.load(cwd + 'data/' + file)
data = infile['iter_count'].T
data = data - data[0, 0]
vmin = min(vmin, data.min())
vmax = max(vmax, data.max())
for file, strategy in fields:
infile = np.load(cwd + 'data/' + file)
data = infile['iter_count'].T
data = data - data[0, 0]
ft_iter = infile['ft_iter']
ft_step = infile['ft_step']
rcParams['figure.figsize'] = 3.0, 2.5
fig, ax = plt.subplots()
cmap = plt.get_cmap('Reds', vmax - vmin + 1)
pcol = plt.pcolor(data, cmap=cmap, vmin=vmin, vmax=vmax)
pcol.set_edgecolor('face')
plt.axis([ft_step[0], ft_step[-1] + 1, ft_iter[0] - 1, ft_iter[-1]])
ticks = np.arange(int(vmin) + 1, int(vmax) + 2, 2)
tickpos = np.linspace(ticks[0] + 0.5, ticks[-1] - 0.5, len(ticks))
cax = plt.colorbar(pcol, ticks=tickpos, format='%2i')
plt.tick_params(axis='both', which='major', labelsize=fs)
cax.set_ticklabels(ticks)
cax.set_label(r'$K_\mathrm{add}$', **axis_font)
cax.ax.tick_params(labelsize=fs)
ax.set_xlabel('affected step', labelpad=1, **axis_font)
ax.set_ylabel(r'affected iteration ($K_\mathrm{fault}$)', labelpad=1, **axis_font)
ax.set_xticks(np.arange(len(ft_step)) + 0.5, minor=False)
ax.set_xticklabels(ft_step, minor=False)
ax.set_yticks(np.arange(len(ft_iter)) + 0.5, minor=False)
ax.set_yticklabels(ft_iter, minor=False)
# Set every second label to invisible
for label in ax.xaxis.get_ticklabels()[::2]:
label.set_visible(False)
ax.tick_params(pad=2)
# plt.tight_layout()
# fname = setup+'_iteration_counts_hf_'+strategy+'.png'
fname = 'data/' + setup + '_iteration_counts_hf_' + strategy + '.png'
plt.savefig(fname, transparent=True, bbox_inches='tight')
# plt.savefig(fname, bbox_inches='tight')
# os.system('pdfcrop ' + fname + ' ' + fname)
plt.close('all')
if __name__ == "__main__":
create_plots(setup='HEAT')
create_plots(setup='ADVECTION')
| 2,887 | 28.773196 | 90 | py |
pySDC | pySDC-master/pySDC/projects/deprecated/node_failure/postproc_hard_faults_detail.py | import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
# import os
def create_plots(setup, cwd=''):
"""
Function to create detailed heatmaps and the iteration plot for a single fault
Args:
setup (str): name of the setup (heat or advection)
cwd: current working directory (for testing)
"""
# basic plotting setup
axis_font = {'fontname': 'Arial', 'size': '8', 'family': 'serif'}
fs = 8 # fontsize
# assemble list of setups
setup_list = [
(setup + '_steps_vs_iteration_hf_NOFAULT.npz', 'NOFAULT', 'no fault', 'k', '^'),
(setup + '_steps_vs_iteration_hf_SPREAD.npz', 'SPREAD', '1-sided', 'red', 'v'),
(setup + '_steps_vs_iteration_hf_INTERP.npz', 'INTERP', '2-sided', 'orange', 'o'),
(setup + '_steps_vs_iteration_hf_SPREAD_PREDICT.npz', 'SPREAD_PREDICT', '1-sided + corr', 'blue', 's'),
(setup + '_steps_vs_iteration_hf_INTERP_PREDICT.npz', 'INTERP_PREDICT', '2-sided + corr', 'green', 'd'),
]
maxres = -1
minres = -11
maxiter = 0
maxsteps = 0
# find axis limits
for file, _, _, _, _ in setup_list:
infile = np.load(cwd + 'data/' + file)
residual = infile['residual']
maxiter = max(maxiter, len(residual[:, 0]))
maxsteps = max(maxsteps, len(residual[0, :]))
# create heatmaps
for file, strategy, _, _, _ in setup_list:
residual = np.zeros((maxiter, maxsteps))
residual[:] = -99
infile = np.load(cwd + 'data/' + file)
input = infile['residual']
step = infile['ft_step']
iter = infile['ft_iter']
residual[0 : len(input[:, 0]), 0 : len(input[0, :])] = input
rcParams['figure.figsize'] = 3.0, 2.5
fig, ax = plt.subplots()
cmap = plt.get_cmap('Reds')
pcol = plt.pcolor(residual.T, cmap=cmap, vmin=minres, vmax=maxres)
pcol.set_edgecolor('face')
plt.axis([0, maxiter, 0, maxsteps])
cax = plt.colorbar(pcol)
cax.set_label('log10(residual)', **axis_font)
cax.ax.tick_params(labelsize=fs)
plt.tick_params(axis='both', which='major', labelsize=fs)
ax.set_xlabel('iteration', labelpad=1, **axis_font)
ax.set_ylabel('step', labelpad=1, **axis_font)
ax.set_xticks(np.arange(maxiter) + 0.5, minor=False)
ax.set_yticks(np.arange(maxsteps) + 0.5, minor=False)
ax.set_xticklabels(np.arange(maxiter) + 1, minor=False)
ax.set_yticklabels(np.arange(maxsteps), minor=False)
# Set every second label to invisible
for labelx in ax.xaxis.get_ticklabels()[::2]:
labelx.set_visible(False)
for labely in ax.yaxis.get_ticklabels()[::2]:
labely.set_visible(False)
ax.tick_params(pad=2)
# plt.tight_layout()
if strategy != 'NOFAULT':
plt.text(step - 1 + 0.5, iter + 0.5, 'x', horizontalalignment='center', verticalalignment='center')
plt.title(strategy.replace('_', '-'), **axis_font)
fname = 'data/' + setup + '_steps_vs_iteration_hf_' + str(step) + 'x' + str(iter) + '_' + strategy + '.png'
plt.savefig(fname, bbox_inches='tight')
# os.system('pdfcrop ' + fname + ' ' + fname)
rcParams['figure.figsize'] = 6.0, 3.0
fig, ax = plt.subplots()
maxiter = 0
lw = 2
ms = 8
# create iteration vs. residual plot
for file, _, label, color, marker in setup_list:
infile = np.load(cwd + 'data/' + file)
residual = infile['residual']
step = infile['ft_step']
iter = infile['ft_iter'] - 1
yvals = residual[residual[:, step] > -99, step]
maxiter = max(maxiter, len(yvals))
xvals = range(1, len(yvals) + 1)
plt.plot(
xvals[0:iter],
yvals[0:iter],
color=color,
linewidth=lw,
linestyle='-',
markersize=ms,
marker=marker,
markeredgecolor='k',
markerfacecolor=color,
label=label,
)
plt.plot(
xvals[iter : len(yvals)],
yvals[iter:],
color=color,
linewidth=lw,
linestyle='-',
markersize=ms,
marker=marker,
markeredgecolor='k',
markerfacecolor=color,
)
xvals = range(1, maxiter + 1)
plt.plot(xvals, [-9 for _ in range(maxiter)], 'k--')
plt.annotate('tolerance', xy=(1, -9.4), fontsize=fs)
left = 6.15
bottom = -12
width = 0.7
height = 12
right = left + width
top = bottom + height
rect = plt.Rectangle(xy=(left, bottom), width=width, height=height, color='lightgrey')
plt.text(
0.5 * (left + right),
0.5 * (bottom + top),
'node failure',
horizontalalignment='center',
verticalalignment='center',
rotation=90,
color='k',
fontsize=fs,
)
fig.gca().add_artist(rect)
plt.xlim(1 - 0.25, maxiter + 0.25)
plt.ylim(minres - 0.25, maxres + 0.25)
plt.xlabel('iteration', **axis_font)
plt.ylabel('log10(residual)', **axis_font)
plt.title('ALL', **axis_font)
ax.xaxis.labelpad = 0
ax.yaxis.labelpad = 0
plt.tick_params(axis='both', which='major', labelsize=fs)
plt.legend(numpoints=1, fontsize=fs)
plt.xticks(range(1, maxiter + 1))
plt.yticks(range(minres, maxres + 1))
ax.tick_params(pad=2)
# plt.tight_layout()
fname = 'data/' + setup + '_residuals_allstrategies.png'
plt.savefig(fname, bbox_inches='tight')
# os.system('pdfcrop ' + fname + ' ' + fname)
plt.close('all')
if __name__ == "__main__":
create_plots(setup='HEAT')
create_plots(setup='ADVECTION')
| 5,794 | 29.182292 | 115 | py |
pySDC | pySDC-master/pySDC/projects/deprecated/node_failure/animate_convergence.py | import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
from matplotlib import rc
def create_animation(cwd=''):
"""
Function to create an animated convergence plot
Args:
cwd: current working directory
"""
rc('font', family='sans-serif', size=30)
rc('legend', fontsize='small')
rc('xtick', labelsize='small')
rc('ytick', labelsize='small')
nprocs = 32
xtick_dist = 16
minstep = 288
maxstep = 384
maxiter_full = 14
maxiter = 0
nsteps = 0
# ref = 'SDC_GRAYSCOTT_stats_hf_NOFAULT_new.npz'
# ref = 'PFASST_GRAYSCOTT_stats_hf_NOFAULT_P32.npz'
# ref = 'PFASST_GRAYSCOTT_stats_hf_SPREAD_P32.npz'
ref = 'PFASST_GRAYSCOTT_stats_hf_INTERP_PREDICT_P32.npz'
data = np.load(cwd + 'data/' + ref)
iter_count = data['iter_count'][minstep:maxstep]
residual = data['residual'][:, minstep:maxstep]
iter_count_blocks = []
for p in range(int((maxstep - minstep) / nprocs)):
step = p * nprocs
iter_count_blocks.append(int(max(iter_count[step : step + nprocs])))
residual = np.where(residual > 0, np.log10(residual), -99)
vmin = -9
vmax = -1 # int(np.amax(residual))
maxiter = max(maxiter, int(max(iter_count)))
maxiter_full = max(maxiter_full, maxiter)
nsteps = max(nsteps, len(iter_count))
fig, ax = plt.subplots(figsize=(20, 7))
ticks = np.arange(vmin, vmax + 1, 2)
# tickpos = np.linspace(ticks[0] + 0.5, ticks[-1] - 0.5, len(ticks))
# cax = plt.colorbar(ticks=tickpos)
# cax.set_ticklabels(ticks)
# cax.set_label('log10(residual)')
ax.set_xlabel('step')
ax.set_ylabel('iteration')
ax.set_yticks(np.arange(1, maxiter_full, 2) + 0.5, minor=False)
ax.set_xticks(np.arange(0, nsteps, xtick_dist) + 0.5, minor=False)
ax.set_yticklabels(np.arange(1, maxiter_full, 2) + 1, minor=False)
ax.set_xticklabels(np.arange(minstep, maxstep, xtick_dist), minor=False)
cmap = plt.get_cmap('Reds', vmax - vmin + 1)
residual = np.zeros((maxiter_full, maxstep - minstep))
plot = plt.pcolor(residual, cmap=cmap, vmin=vmin, vmax=vmax)
plt.text(0, 0, '', horizontalalignment='center', verticalalignment='center')
ticks = np.arange(vmin, vmax + 1, 2)
tickpos = np.linspace(ticks[0] + 0.5, ticks[-1] - 0.5, len(ticks))
cax = plt.colorbar(ticks=tickpos)
cax.set_ticklabels(ticks)
cax.set_label('log10(residual)')
fig.tight_layout()
def init():
res = np.zeros((maxiter_full, maxstep - minstep))
plot.set_array(res.ravel())
return plot
def animate(index):
csum_blocks = np.zeros(len(iter_count_blocks) + 1)
csum_blocks[1:] = np.cumsum(iter_count_blocks)
block = np.searchsorted(csum_blocks[1:], index)
step = block * nprocs + minstep
iter = index - int(csum_blocks[block])
res = np.zeros((maxiter_full, maxstep - minstep))
res[0:maxiter, 0 : step - minstep] = data['residual'][0:maxiter, minstep:step]
res[0:iter, 0 : step + nprocs - minstep] = data['residual'][0:iter, minstep : step + nprocs]
res = np.where(res > 0, np.log10(res), -99)
plot.set_array(res.ravel())
return plot
anim = animation.FuncAnimation(
fig, animate, init_func=init, frames=sum(iter_count_blocks) + 1, interval=1, blit=False, repeat=False
)
if "NOFAULT" not in ref:
stats = data['hard_stats']
for item in stats:
if item[0] in range(minstep, maxstep):
plt.text(
item[0] + 0.5 - (maxstep - nsteps),
item[1] - 1 + 0.5,
'x',
horizontalalignment='center',
verticalalignment='center',
)
# fig.subplots_adjust(left=0.01, bottom=0.01, right=1.2, top=1, wspace=None, hspace=None)
# Set up formatting for the movie files
Writer = animation.writers['ffmpeg']
# Writer = animation.writers['imagemagick']
# choose fps=1 for PFASST, fps=15 for SDC
writer = Writer(fps=1, metadata=dict(artist='Me'), bitrate=3200)
fname = 'data/anim_conv_' + ref.split('.')[0] + '.mp4'
anim.save(fname, writer=writer)
if __name__ == "__main__":
create_animation()
| 4,289 | 31.014925 | 109 | py |
pySDC | pySDC-master/pySDC/projects/deprecated/node_failure/postproc_boussinesq.py | import matplotlib
matplotlib.use('Agg')
import numpy as np
# import os
import matplotlib.pyplot as plt
from pylab import rcParams
axis_font = {'fontname': 'Arial', 'size': '8', 'family': 'serif'}
fs = 8
ms = 8
lw = 2
def create_plots(cwd=''):
"""
Function to plot the results of the fault-tolerant Boussinesq system
Args:
cwd: current workign directory
"""
ref = 'PFASST_BOUSSINESQ_stats_hf_NOFAULT_P16.npz'
# noinspection PyShadowingBuiltins
list = [
('PFASST_BOUSSINESQ_stats_hf_SPREAD_P16.npz', 'SPREAD', '1-sided', 'red', 's'),
('PFASST_BOUSSINESQ_stats_hf_INTERP_P16.npz', 'INTERP', '2-sided', 'orange', 'o'),
('PFASST_BOUSSINESQ_stats_hf_SPREAD_PREDICT_P16.npz', 'SPREAD_PREDICT', '1-sided+corr', 'blue', '^'),
('PFASST_BOUSSINESQ_stats_hf_INTERP_PREDICT_P16.npz', 'INTERP_PREDICT', '2-sided+corr', 'green', 'd'),
('PFASST_BOUSSINESQ_stats_hf_NOFAULT_P16.npz', 'NOFAULT', 'no fault', 'black', 'v'),
]
nprocs = 16
xtick_dist = 8
minstep = 128
maxstep = 176
# minstep = 0
# maxstep = 320
nblocks = int(320 / nprocs)
# maxiter = 14
nsteps = 0
maxiter = 0
vmax = -99
vmin = -8
for file, _, _, _, _ in list:
data = np.load(cwd + 'data/' + file)
iter_count = data['iter_count'][minstep:maxstep]
residual = data['residual'][:, minstep:maxstep]
residual[residual <= 0] = 1e-99
residual = np.log10(residual)
vmax = max(vmax, int(np.amax(residual)))
maxiter = max(maxiter, int(max(iter_count)))
nsteps = max(nsteps, len(iter_count))
print(vmin, vmax)
data = np.load(cwd + 'data/' + ref)
ref_iter_count = data['iter_count'][nprocs - 1 :: nprocs]
rcParams['figure.figsize'] = 6.0, 2.5
fig, ax = plt.subplots()
plt.plot(range(nblocks), [0] * nblocks, 'k-', linewidth=2)
ymin = 99
ymax = 0
for file, _, label, color, marker in list:
if file is not ref:
data = np.load(cwd + 'data/' + file)
iter_count = data['iter_count'][nprocs - 1 :: nprocs]
ymin = min(ymin, min(iter_count - ref_iter_count))
ymax = max(ymax, max(iter_count - ref_iter_count))
plt.plot(
range(nblocks),
iter_count - ref_iter_count,
color=color,
label=label,
marker=marker,
linestyle='',
linewidth=lw,
markersize=ms,
)
plt.xlabel('block', **axis_font)
plt.ylabel('$K_\\mathrm{add}$', **axis_font)
plt.title('ALL', **axis_font)
plt.xlim(-1, nblocks)
plt.ylim(-1 + ymin, ymax + 1)
plt.legend(loc=2, numpoints=1, fontsize=fs)
plt.tick_params(axis='both', which='major', labelsize=fs)
ax.xaxis.labelpad = -0.5
ax.yaxis.labelpad = -1
# plt.tight_layout()
fname = 'data/BOUSSINESQ_Kadd_vs_NOFAULT_hf.png'
plt.savefig(fname, bbox_inches='tight')
# os.system('pdfcrop ' + fname + ' ' + fname)
for file, strategy, _, _, _ in list:
data = np.load(cwd + 'data/' + file)
residual = data['residual'][:, minstep:maxstep]
stats = data['hard_stats']
residual[residual <= 0] = 1e-99
residual = np.log10(residual)
rcParams['figure.figsize'] = 6.0, 2.5
fig, ax = plt.subplots()
cmap = plt.get_cmap('Reds', vmax - vmin + 1)
pcol = plt.pcolor(residual, cmap=cmap, vmin=vmin, vmax=vmax)
pcol.set_edgecolor('face')
if file is not ref:
for item in stats:
if item[0] in range(minstep, maxstep):
plt.text(
item[0] + 0.5 - (maxstep - nsteps),
item[1] - 1 + 0.5,
'x',
horizontalalignment='center',
verticalalignment='center',
)
plt.axis([0, nsteps, 0, maxiter])
ticks = np.arange(vmin, vmax + 1)
tickpos = np.linspace(ticks[0] + 0.5, ticks[-1] - 0.5, len(ticks))
cax = plt.colorbar(pcol, ticks=tickpos, pad=0.02)
cax.set_ticklabels(ticks)
cax.ax.tick_params(labelsize=fs)
cax.set_label('log10(residual)', **axis_font)
plt.tick_params(axis='both', which='major', labelsize=fs)
ax.xaxis.labelpad = -0.5
ax.yaxis.labelpad = -0.5
ax.set_xlabel('step', **axis_font)
ax.set_ylabel('iteration', **axis_font)
ax.set_yticks(np.arange(1, maxiter, 2) + 0.5, minor=False)
ax.set_xticks(np.arange(0, nsteps, xtick_dist) + 0.5, minor=False)
ax.set_yticklabels(np.arange(1, maxiter, 2) + 1, minor=False)
ax.set_xticklabels(np.arange(minstep, maxstep, xtick_dist), minor=False)
plt.title(strategy.replace('_', '-'))
# plt.tight_layout()
fname = 'data/BOUSSINESQ_steps_vs_iteration_hf_' + strategy + '.png'
plt.savefig(fname, bbox_inches='tight')
# os.system('pdfcrop ' + fname + ' ' + fname)
plt.close('all')
if __name__ == "__main__":
create_plots()
| 5,161 | 29.187135 | 110 | py |
pySDC | pySDC-master/pySDC/projects/deprecated/node_failure/hard_faults_detail.py | import numpy as np
import pySDC.projects.deprecated.node_failure.emulate_hard_faults as ft
from pySDC.helpers.stats_helper import get_sorted, filter_stats
from pySDC.implementations.problem_classes.AdvectionEquation_ND_FD import advectionNd
from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_forced
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
from pySDC.projects.deprecated.node_failure.controller_nonMPI_hard_faults import controller_nonMPI_hard_faults
# noinspection PyShadowingNames,PyShadowingBuiltins
def main(ft_setups, ft_strategies):
"""
This routine generates the heatmaps showing the residual for a node failures at step n and iteration k
"""
num_procs = 16
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-09
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['finter'] = True
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 6
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# choose the iteration/step where the fault should happen
ft_step = 7
ft_iter = 7
for setup in ft_setups:
if setup == 'HEAT':
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = 0.5
problem_params['freq'] = 1
problem_params['nvars'] = [255, 127]
problem_params['bc'] = 'dirichlet-zero' # BCs
level_params['dt'] = 0.5
space_transfer_params['periodic'] = False
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = heatNd_forced # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
# setup parameters "in time"
t0 = 0.0
Tend = 8.0
elif setup == 'ADVECTION':
# initialize problem parameters
problem_params = dict()
problem_params['c'] = 1.0
problem_params['nvars'] = [256, 128]
problem_params['freq'] = 2
problem_params['order'] = 2
problem_params['bc'] = 'periodic' # boundary conditions
level_params['dt'] = 0.125
space_transfer_params['periodic'] = True
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = advectionNd # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
# setup parameters "in time"
t0 = 0.0
Tend = 2.0
else:
raise NotImplementedError('setup not implemented')
# loop over all stategies and check how things evolve
for strategy in ft_strategies:
print('Working on setup %s with strategy %s..' % (setup, strategy))
ft.strategy = strategy
ft.hard_step = ft_step
ft.hard_iter = ft_iter
controller = controller_nonMPI_hard_faults(
num_procs=num_procs, controller_params=controller_params, description=description
)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# stats magic: get iteration counts to find maxiter/niter
sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')
niter = max([item[1] for item in sortedlist_stats])
print('Iterations:', niter)
residual = np.zeros((niter, num_procs))
residual[:] = -99
# stats magic: extract all residuals (steps vs. iterations)
extract_stats = filter_stats(stats, type='residual_post_iteration')
for k, v in extract_stats.items():
step = k.process
iter = k.iter
if iter != -1:
residual[iter - 1, step] = np.log10(v)
print('')
np.savez(
'data/' + setup + '_steps_vs_iteration_hf_' + strategy,
residual=residual,
ft_step=ft.hard_step,
ft_iter=ft.hard_iter,
)
if __name__ == "__main__":
ft_strategies = ['NOFAULT', 'SPREAD', 'SPREAD_PREDICT', 'INTERP', 'INTERP_PREDICT']
# ft_setups = ['ADVECTION', 'HEAT']
# ft_strategies = ['NOFAULT']
ft_setups = ['HEAT']
main(ft_setups=ft_setups, ft_strategies=ft_strategies)
| 6,315 | 38.72327 | 111 | py |
pySDC | pySDC-master/pySDC/projects/deprecated/node_failure/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/deprecated/node_failure/postproc_grayscott.py | import os
import numpy as np
import pySDC.helpers.plot_helper as plt_helper
axis_font = {'fontname': 'Arial', 'size': '8', 'family': 'serif'}
fs = 8
ms = 8
lw = 2
def create_plots(cwd=''):
"""
Function to visualize the results of the Gray-Scott show case
Args:
cwd: current working directory
"""
ref = 'PFASST_GRAYSCOTT_stats_hf_NOFAULT_P32.npz'
list = [
('PFASST_GRAYSCOTT_stats_hf_SPREAD_P32.npz', 'SPREAD', '1-sided', 'red', 's'),
('PFASST_GRAYSCOTT_stats_hf_INTERP_P32.npz', 'INTERP', '2-sided', 'orange', 'o'),
('PFASST_GRAYSCOTT_stats_hf_SPREAD_PREDICT_P32.npz', 'SPREAD_PREDICT', '1-sided+corr', 'blue', '^'),
('PFASST_GRAYSCOTT_stats_hf_INTERP_PREDICT_P32.npz', 'INTERP_PREDICT', '2-sided+corr', 'green', 'd'),
('PFASST_GRAYSCOTT_stats_hf_NOFAULT_P32.npz', 'NOFAULT', 'no fault', 'black', 'v'),
]
# list = [('PFASST_GRAYSCOTT_stats_hf_INTERP_P32_cN512.npz', 'INTERP', '2-sided', 'orange', 'o'),
# ('PFASST_GRAYSCOTT_stats_hf_INTERP_PREDICT_P32_cN512.npz', 'INTERP_PREDICT', '2-sided+corr', 'green', 'd'),
# ('PFASST_GRAYSCOTT_stats_hf_NOFAULT_P32.npz', 'NOFAULT', 'no fault', 'black', 'v')]
nprocs = 32
xtick_dist = 16
minstep = 288
maxstep = 384
# minstep = 0
# maxstep = 640
nblocks = int(640 / nprocs)
# maxiter = 14
nsteps = 0
maxiter = 0
vmax = -99
vmin = 99
for file, _, _, _, _ in list:
data = np.load(cwd + 'data/' + file)
iter_count = data['iter_count'][minstep:maxstep]
residual = data['residual'][:, minstep:maxstep]
residual[residual <= 0] = 1e-99
residual = np.log10(residual)
vmin = -9
vmax = max(vmax, int(np.amax(residual)))
maxiter = max(maxiter, int(max(iter_count)))
nsteps = max(nsteps, len(iter_count))
data = np.load(cwd + 'data/' + ref)
ref_iter_count = data['iter_count'][nprocs - 1 :: nprocs]
plt_helper.setup_mpl()
fig, ax = plt_helper.newfig(textwidth=238.96, scale=2.0, ratio=0.3)
ax.plot(range(nblocks), [0] * nblocks, 'k-', linewidth=2)
ymin = 99
ymax = 0
for file, _, label, color, marker in list:
if file is not ref:
data = np.load(cwd + 'data/' + file)
iter_count = data['iter_count'][nprocs - 1 :: nprocs]
ymin = min(ymin, min(iter_count - ref_iter_count))
ymax = max(ymax, max(iter_count - ref_iter_count))
ax.plot(
range(nblocks),
iter_count - ref_iter_count,
color=color,
label=label,
marker=marker,
linestyle='',
linewidth=lw,
markersize=ms,
)
ax.set_xlabel('block')
ax.set_ylabel(r'$K_\mathrm{add}$')
# ax.set_title('ALL')
ax.set_xlim(-1, nblocks)
ax.set_ylim(-1 + ymin, ymax + 1)
ax.legend(loc=2, numpoints=1, fontsize=fs)
ax.tick_params(axis='both', which='major', labelsize=fs)
# save plot, beautify
fname = 'data/GRAYSCOTT_Kadd_vs_NOFAULT_hf'
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
for file, strategy, _, _, _ in list:
data = np.load(cwd + 'data/' + file)
residual = data['residual'][:, minstep:maxstep]
stats = data['hard_stats']
residual[residual <= 0] = 1e-99
residual = np.log10(residual)
fig, ax = plt_helper.newfig(textwidth=238.96, scale=2.0, ratio=0.3)
cmap = plt_helper.plt.get_cmap('Reds', vmax - vmin + 1)
pcol = ax.pcolor(residual, cmap=cmap, vmin=vmin, vmax=vmax)
pcol.set_edgecolor('face')
if file is not ref:
for item in stats:
if item[0] in range(minstep, maxstep):
ax.text(
item[0] + 0.5 - (maxstep - nsteps),
item[1] - 1 + 0.5,
'X',
horizontalalignment='center',
verticalalignment='center',
)
ax.axis([0, nsteps, 0, maxiter - 1])
ticks = np.arange(vmin, vmax + 1, 2)
tickpos = np.linspace(ticks[0] + 0.5, ticks[-1] - 0.5, len(ticks))
cax = plt_helper.plt.colorbar(pcol, ticks=tickpos, pad=0.02)
cax.set_ticklabels(ticks)
cax.ax.tick_params(labelsize=fs)
cax.set_label('log10(residual)')
ax.tick_params(axis='both', which='major', labelsize=fs)
ax.set_xlabel('step')
ax.set_ylabel('iteration')
ax.set_yticks(np.arange(1, maxiter, 2) + 0.5, minor=False)
ax.set_xticks(np.arange(0, nsteps, xtick_dist) + 0.5, minor=False)
ax.set_yticklabels(np.arange(1, maxiter, 2) + 1, minor=False)
ax.set_xticklabels(np.arange(minstep, maxstep, xtick_dist), minor=False)
# plt.title(strategy.replace('_', '-'))
# plt.tight_layout()
fname = 'data/GRAYSCOTT_steps_vs_iteration_hf_' + strategy
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
if __name__ == "__main__":
create_plots()
| 5,592 | 32.291667 | 120 | py |
pySDC | pySDC-master/pySDC/projects/deprecated/node_failure/grayscott_example.py | import numpy as np
import pySDC.projects.node_failure.emulate_hard_faults as ft
from pySDC.helpers.stats_helper import get_sorted, filter_stats
from pySDC.implementations.problem_classes.GrayScott_1D_FEniCS_implicit import fenics_grayscott
from pySDC.implementations.sweeper_classes.generic_LU import generic_LU
from pySDC.implementations.transfer_classes.TransferFenicsMesh import mesh_to_mesh_fenics
from pySDC.projects.node_failure.controller_nonMPI_hard_faults import controller_nonMPI_hard_faults
# noinspection PyShadowingNames,PyShadowingBuiltins
def main(ft_strategies):
"""
This routine generates the heatmaps showing the residual for node failures at different steps and iterations
"""
num_procs = 32
# setup parameters "in time"
t0 = 0
dt = 2.0
Tend = 1280.0
Nsteps = int((Tend - t0) / dt)
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-07
level_params['dt'] = dt
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['finter'] = True
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = 'LU'
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
# initialize problem parameters
problem_params = dict()
# problem_params['Du'] = 1.0
# problem_params['Dv'] = 0.01
# problem_params['A'] = 0.01
# problem_params['B'] = 0.10
# splitting pulses until steady state
# problem_params['Du'] = 1.0
# problem_params['Dv'] = 0.01
# problem_params['A'] = 0.02
# problem_params['B'] = 0.079
# splitting pulses until steady state
problem_params['Du'] = 1.0
problem_params['Dv'] = 0.01
problem_params['A'] = 0.09
problem_params['B'] = 0.086
problem_params['t0'] = t0 # ugly, but necessary to set up ProblemClass
problem_params['c_nvars'] = [256]
problem_params['family'] = 'CG'
problem_params['order'] = [4]
problem_params['refinements'] = [1, 0]
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = fenics_grayscott # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_LU # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh_fenics # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
ft.hard_random = 0.03
controller = controller_nonMPI_hard_faults(
num_procs=num_procs, controller_params=controller_params, description=description
)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
for strategy in ft_strategies:
print('------------------------------------------ working on strategy ', strategy)
ft.strategy = strategy
# read in reference data from clean run, will provide reproducable locations for faults
if strategy != 'NOFAULT':
reffile = np.load('data/PFASST_GRAYSCOTT_stats_hf_NOFAULT_P16.npz')
ft.refdata = reffile['hard_stats']
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# get residuals of the run
extract_stats = filter_stats(stats, type='residual_post_iteration')
# find boundaries for x-,y- and c-axis as well as arrays
maxprocs = 0
maxiter = 0
minres = 0
maxres = -99
for k, v in extract_stats.items():
maxprocs = max(maxprocs, k.process)
maxiter = max(maxiter, k.iter)
minres = min(minres, np.log10(v))
maxres = max(maxres, np.log10(v))
# grep residuals and put into array
residual = np.zeros((maxiter, maxprocs + 1))
residual[:] = -99
for k, v in extract_stats.items():
step = k.process
iter = k.iter
if iter != -1:
residual[iter - 1, step] = np.log10(v)
# stats magic: get niter (probably redundant with maxiter)
sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')
iter_count = np.zeros(Nsteps)
for item in sortedlist_stats:
iter_count[item[0]] = item[1]
print(iter_count)
np.savez(
'data/PFASST_GRAYSCOTT_stats_hf_' + ft.strategy + '_P' + str(num_procs),
residual=residual,
iter_count=iter_count,
hard_stats=ft.hard_stats,
)
if __name__ == "__main__":
# ft_strategies = ['SPREAD', 'SPREAD_PREDICT', 'INTERP', 'INTERP_PREDICT']
ft_strategies = ['NOFAULT']
main(ft_strategies=ft_strategies)
| 5,276 | 34.655405 | 112 | py |
pySDC | pySDC-master/pySDC/projects/deprecated/node_failure/controller_nonMPI_hard_faults.py | from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.convergence_controller_classes.check_convergence import CheckConvergence
from pySDC.projects.node_failure.emulate_hard_faults import hard_fault_injection
class controller_nonMPI_hard_faults(controller_nonMPI):
"""
PFASST controller, running serialized version of PFASST in classical style, allows node failure before fine sweep
"""
def pfasst(self, S, num_procs):
"""
Main function including the stages of SDC, MLSDC and PFASST (the "controller")
For the workflow of this controller, check out one of our PFASST talks
Args:
S: currently active step
Returns:
updated step
"""
# if S is done, stop right here
if S.status.done:
return S
stage = S.status.stage
self.logger.debug("Process %2i at stage %s" % (S.status.slot, stage))
if stage == 'SPREAD':
# first stage: spread values
self.hooks.pre_step(step=S, level_number=0)
# call predictor from sweeper
S.levels[0].sweep.predict()
# update stage
if len(S.levels) > 1 and self.params.predict: # MLSDC or PFASST with predict
S.status.stage = 'PREDICT_RESTRICT'
elif len(S.levels) > 1: # MLSDC or PFASST without predict
self.hooks.pre_iteration(step=S, level_number=0)
S.status.stage = 'IT_FINE_SWEEP'
elif num_procs > 1: # MSSDC
self.hooks.pre_iteration(step=S, level_number=0)
S.status.stage = 'IT_COARSE_SWEEP'
elif num_procs == 1: # SDC
self.hooks.pre_iteration(step=S, level_number=0)
S.status.stage = 'IT_FINE_SWEEP'
else:
print("Don't know what to do after spread, aborting")
exit()
return S
elif stage == 'PREDICT_RESTRICT':
# call predictor (serial)
# go to coarsest level via transfer
for l in range(1, len(S.levels)):
S.transfer(source=S.levels[l - 1], target=S.levels[l])
# update stage and return
S.status.stage = 'PREDICT_SWEEP'
return S
elif stage == 'PREDICT_SWEEP':
# do a (serial) sweep on coarsest level
# receive new values from previous step (if not first step)
if not S.status.first:
if S.prev.levels[-1].tag:
self.recv(S.levels[-1], S.prev.levels[-1])
# reset tag to signal successful receive
S.prev.levels[-1].tag = False
# do the sweep with (possibly) new values
S.levels[-1].sweep.update_nodes()
# update stage and return
S.status.stage = 'PREDICT_SEND'
return S
elif stage == 'PREDICT_SEND':
# send updated values on coarsest level
# send new values forward, if previous send was successful (otherwise: try again)
if not S.status.last:
if not S.levels[-1].tag:
self.send(S.levels[-1], tag=True)
else:
S.status.stage = 'PREDICT_SEND'
return S
# decrement counter to determine how many coarse sweeps are necessary
S.status.pred_cnt -= 1
# update stage and return
if S.status.pred_cnt == 0:
S.status.stage = 'PREDICT_INTERP'
else:
S.status.stage = 'PREDICT_SWEEP'
return S
elif stage == 'PREDICT_INTERP':
# prolong back to finest level
for l in range(len(S.levels) - 1, 0, -1):
S.transfer(source=S.levels[l], target=S.levels[l - 1])
# update stage and return
self.hooks.pre_iteration(step=S, level_number=0)
S.status.stage = 'IT_FINE_SWEEP'
return S
elif stage == 'IT_FINE_SWEEP':
# do sweep on finest level
S = hard_fault_injection(S)
# standard sweep workflow: update nodes, compute residual, log progress
self.hooks.pre_sweep(step=S, level_number=0)
S.levels[0].sweep.update_nodes()
S.levels[0].sweep.compute_residual()
self.hooks.post_sweep(step=S, level_number=0)
# update stage and return
S.status.stage = 'IT_FINE_SEND'
return S
elif stage == 'IT_FINE_SEND':
# send forward values on finest level
# if last send succeeded on this level or if last rank, send new values (otherwise: try again)
if not S.levels[0].tag or S.status.last or S.next.status.done:
self.send(S.levels[0], tag=True)
S.status.stage = 'IT_CHECK'
else:
S.status.stage = 'IT_FINE_SEND'
# return
return S
elif stage == 'IT_CHECK':
# check whether to stop iterating
self.hooks.post_iteration(step=S, level_number=0)
S.status.done = CheckConvergence.check_convergence(S)
# if the previous step is still iterating but I am done, un-do me to still forward values
if not S.status.first and S.status.done and (S.prev.status.done is not None and not S.prev.status.done):
S.status.done = False
# if I am done, signal accordingly, otherwise proceed
if S.status.done:
S.levels[0].sweep.compute_end_point()
self.hooks.post_step(step=S, level_number=0)
S.status.stage = 'DONE'
else:
# increment iteration count here (and only here)
S.status.iter += 1
self.hooks.pre_iteration(step=S, level_number=0)
if len(S.levels) > 1:
S.status.stage = 'IT_UP'
elif num_procs > 1: # MSSDC
S.status.stage = 'IT_COARSE_RECV'
elif num_procs == 1: # SDC
S.status.stage = 'IT_FINE_SWEEP'
# return
return S
elif stage == 'IT_UP':
# go up the hierarchy from finest to coarsest level
S.transfer(source=S.levels[0], target=S.levels[1])
# sweep and send on middle levels (not on finest, not on coarsest, though)
for l in range(1, len(S.levels) - 1):
self.hooks.pre_sweep(step=S, level_number=l)
S.levels[l].sweep.update_nodes()
S.levels[l].sweep.compute_residual()
self.hooks.post_sweep(step=S, level_number=l)
# send if last send succeeded on this level (otherwise: abort with error (FIXME))
if not S.levels[l].tag or S.status.last or S.next.status.done:
self.send(S.levels[l], tag=True)
else:
print('SEND ERROR', l, S.levels[l].tag)
exit()
# transfer further up the hierarchy
S.transfer(source=S.levels[l], target=S.levels[l + 1])
# update stage and return
S.status.stage = 'IT_COARSE_RECV'
return S
elif stage == 'IT_COARSE_RECV':
# receive on coarsest level
# rather complex logic here...
# if I am not the first in line and if the first is not done yet, try to receive
# otherwise: proceed, no receiving possible/necessary
if not S.status.first and not S.prev.status.done:
# try to receive and the progress (otherwise: try again)
if S.prev.levels[-1].tag:
self.recv(S.levels[-1], S.prev.levels[-1])
S.prev.levels[-1].tag = False
if len(S.levels) > 1 or num_procs > 1:
S.status.stage = 'IT_COARSE_SWEEP'
else:
print('you should not be here')
exit()
else:
S.status.stage = 'IT_COARSE_RECV'
else:
if len(S.levels) > 1 or num_procs > 1:
S.status.stage = 'IT_COARSE_SWEEP'
else:
print('you should not be here')
exit()
# return
return S
elif stage == 'IT_COARSE_SWEEP':
# coarsest sweep
# standard sweep workflow: update nodes, compute residual, log progress
self.hooks.pre_sweep(step=S, level_number=len(S.levels) - 1)
S.levels[-1].sweep.update_nodes()
S.levels[-1].sweep.compute_residual()
self.hooks.post_sweep(step=S, level_number=len(S.levels) - 1)
# update stage and return
S.status.stage = 'IT_COARSE_SEND'
return S
elif stage == 'IT_COARSE_SEND':
# send forward coarsest values
# try to send new values (if old ones have not been picked up yet, retry)
if not S.levels[-1].tag or S.status.last or S.next.status.done:
self.send(S.levels[-1], tag=True)
# update stage
if len(S.levels) > 1: # MLSDC or PFASST
S.status.stage = 'IT_DOWN'
else: # MSSDC
S.status.stage = 'IT_CHECK'
else:
S.status.stage = 'IT_COARSE_SEND'
# return
return S
elif stage == 'IT_DOWN':
# prolong corrections own to finest level
# receive and sweep on middle levels (except for coarsest level)
for l in range(len(S.levels) - 1, 0, -1):
# if applicable, try to receive values from IT_UP, otherwise abort (fixme)
if not S.status.first and not S.prev.status.done:
if S.prev.levels[l - 1].tag:
self.recv(S.levels[l - 1], S.prev.levels[l - 1])
S.prev.levels[l - 1].tag = False
else:
print('RECV ERROR DOWN')
exit()
# prolong values
S.transfer(source=S.levels[l], target=S.levels[l - 1])
# on middle levels: do sweep as usual
if l - 1 > 0:
self.hooks.pre_sweep(step=S, level_number=l - 1)
S.levels[l - 1].sweep.update_nodes()
S.levels[l - 1].sweep.compute_residual()
self.hooks.post_sweep(step=S, level_number=l - 1)
# update stage and return
S.status.stage = 'IT_FINE_SWEEP'
return S
else:
# fixme: use meaningful error object here
print('Something is wrong here, you should have hit one case statement!')
exit()
| 11,076 | 36.805461 | 117 | py |
pySDC | pySDC-master/pySDC/projects/deprecated/node_failure/boussinesq_example.py | import numpy as np
import pySDC.projects.node_failure.emulate_hard_faults as ft
from pySDC.helpers.stats_helper import get_sorted, filter_stats
from pySDC.implementations.problem_classes.Boussinesq_2D_FD_imex import boussinesq_2d_imex
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.transfer_classes.TransferMesh_NoCoarse import mesh_to_mesh
from pySDC.projects.node_failure.controller_nonMPI_hard_faults import controller_nonMPI_hard_faults
# noinspection PyShadowingNames,PyShadowingBuiltins
def main(ft_strategies):
"""
This routine generates the heatmaps showing the residual for node failures at different steps and iterations
"""
num_procs = 16
# setup parameters "in time"
t0 = 0
Tend = 960
Nsteps = 320
dt = Tend / float(Nsteps)
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-06
level_params['dt'] = dt
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['finter'] = True
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 6
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = 'LU'
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
# initialize problem parameters
problem_params = dict()
# problem_params['nvars'] = [(4, 450, 30), (4, 450, 30)]
problem_params['nvars'] = [(4, 100, 10), (4, 100, 10)]
problem_params['u_adv'] = 0.02
problem_params['c_s'] = 0.3
problem_params['Nfreq'] = 0.01
problem_params['x_bounds'] = [(-150.0, 150.0)]
problem_params['z_bounds'] = [(0.0, 10.0)]
problem_params['order'] = [4, 2]
problem_params['order_upw'] = [5, 1]
problem_params['gmres_maxiter'] = [50, 50]
problem_params['gmres_restart'] = [10, 10]
problem_params['gmres_tol_limit'] = [1e-10, 1e-10]
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = boussinesq_2d_imex # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
ft.hard_random = 0.03
controller = controller_nonMPI_hard_faults(
num_procs=num_procs, controller_params=controller_params, description=description
)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
cfl_advection = P.params.u_adv * dt / P.h[0]
cfl_acoustic_hor = P.params.c_s * dt / P.h[0]
cfl_acoustic_ver = P.params.c_s * dt / P.h[1]
print("CFL number of advection: %4.2f" % cfl_advection)
print("CFL number of acoustics (horizontal): %4.2f" % cfl_acoustic_hor)
print("CFL number of acoustics (vertical): %4.2f" % cfl_acoustic_ver)
for strategy in ft_strategies:
print('------------------------------------------ working on strategy ', strategy)
ft.strategy = strategy
# read in reference data from clean run, will provide reproducable locations for faults
if strategy != 'NOFAULT':
reffile = np.load('data/PFASST_BOUSSINESQ_stats_hf_NOFAULT_P16.npz')
ft.refdata = reffile['hard_stats']
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
P.report_log()
# get residuals of the run
extract_stats = filter_stats(stats, type='residual_post_iteration')
# find boundaries for x-,y- and c-axis as well as arrays
maxprocs = 0
maxiter = 0
minres = 0
maxres = -99
for k, v in extract_stats.items():
maxprocs = max(maxprocs, k.process)
maxiter = max(maxiter, k.iter)
minres = min(minres, np.log10(v))
maxres = max(maxres, np.log10(v))
# grep residuals and put into array
residual = np.zeros((maxiter, maxprocs + 1))
residual[:] = -99
for k, v in extract_stats.items():
step = k.process
iter = k.iter
if iter != -1:
residual[iter - 1, step] = np.log10(v)
# stats magic: get niter (probably redundant with maxiter)
sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')
iter_count = np.zeros(Nsteps)
for item in sortedlist_stats:
iter_count[item[0]] = item[1]
print(iter_count)
np.savez(
'data/PFASST_BOUSSINESQ_stats_hf_' + ft.strategy + '_P' + str(num_procs),
residual=residual,
iter_count=iter_count,
hard_stats=ft.hard_stats,
)
if __name__ == "__main__":
# ft_strategies = ['SPREAD', 'SPREAD_PREDICT', 'INTERP', 'INTERP_PREDICT']
ft_strategies = ['NOFAULT']
main(ft_strategies=ft_strategies)
| 5,580 | 35.960265 | 112 | py |
pySDC | pySDC-master/pySDC/projects/deprecated/node_failure/hard_faults_test.py | import numpy as np
import pySDC.projects.deprecated.node_failure.emulate_hard_faults as ft
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.problem_classes.AdvectionEquation_ND_FD import advectionNd
from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_forced
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
from pySDC.projects.deprecated.node_failure.controller_nonMPI_hard_faults import controller_nonMPI_hard_faults
# noinspection PyShadowingNames,PyShadowingBuiltins
def main(ft_setups, ft_strategies):
"""
This routine generates the heatmaps showing the residual for node failures at different steps and iterations
"""
num_procs = 16
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-09
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['finter'] = True
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 6
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
for setup in ft_setups:
if setup == 'HEAT':
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = 0.5
problem_params['freq'] = 1
problem_params['nvars'] = [255, 127]
problem_params['bc'] = 'dirichlet-zero'
level_params['dt'] = 0.5
space_transfer_params['periodic'] = False
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = heatNd_forced # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
# setup parameters "in time"
t0 = 0.0
Tend = 8.0
elif setup == 'ADVECTION':
# initialize problem parameters
problem_params = dict()
problem_params['c'] = 1.0
problem_params['nvars'] = [256, 128]
problem_params['freq'] = 2
problem_params['order'] = 2
problem_params['bc'] = 'periodic' # boundary conditions
level_params['dt'] = 0.125
space_transfer_params['periodic'] = True
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = advectionNd # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
# setup parameters "in time"
t0 = 0.0
Tend = 2.0
else:
raise NotImplementedError('setup not implemented')
# do a reference run without any faults to see how things would look like (and to get maxiter/ref_niter)
ft.strategy = 'NOFAULT'
controller = controller_nonMPI_hard_faults(
num_procs=num_procs, controller_params=controller_params, description=description
)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# stats magic: get iteration counts to find maxiter/niter
sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')
ref_niter = max([item[1] for item in sortedlist_stats])
print('Will sweep over %i steps and %i iterations now...' % (num_procs, ref_niter))
# loop over all strategies
for strategy in ft_strategies:
ft_iter = range(1, ref_niter + 1)
ft_step = range(0, num_procs)
print('------------------------------------------ working on strategy ', strategy)
iter_count = np.zeros((len(ft_step), len(ft_iter)))
# loop over all steps
xcnt = -1
for step in ft_step:
xcnt += 1
# loop over all iterations
ycnt = -1
for iter in ft_iter:
ycnt += 1
ft.hard_step = step
ft.hard_iter = iter
ft.strategy = strategy
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# stats magic: get iteration counts to find maxiter/niter
sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')
niter = max([item[1] for item in sortedlist_stats])
iter_count[xcnt, ycnt] = niter
print(iter_count)
np.savez(
'data/' + setup + '_results_hf_' + strategy,
iter_count=iter_count,
description=description,
ft_step=ft_step,
ft_iter=ft_iter,
)
if __name__ == "__main__":
ft_strategies = ['SPREAD', 'SPREAD_PREDICT', 'INTERP', 'INTERP_PREDICT']
ft_setups = ['ADVECTION', 'HEAT']
# ft_strategies = ['NOFAULT']
# ft_setups = ['HEAT']
main(ft_setups=ft_setups, ft_strategies=ft_strategies)
| 6,806 | 38.12069 | 112 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.