python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
import os
import re
from datasets import load_dataset
base_dir = os.environ['BASE_DIR']
def load_dataset_with_name(dataset_name, split):
"""
Take a dataset name and split name, load the dataset.
Returns a huggingface dataset dict.
"""
# TODO: Uncomment this line after refactor
# path = base_dir + '/data/' + dataset_name + '/' + split + '_split/'
path = base_dir + '/baseline_replication/TMCD/data/' + dataset_name + '/' + split + '_split/'
data_files = {}
if os.path.exists(path + 'train.tsv'):
data_files["train"] = path + 'train.tsv'
if os.path.exists(path + 'dev.tsv'):
data_files["validation"] = path + 'dev.tsv'
if os.path.exists(path + 'test.tsv'):
data_files["test"] = path + 'test.tsv'
if os.path.exists(path + 'gen.tsv'):
data_files["gen"] = path + 'gen.tsv'
raw_datasets = load_dataset("csv", data_files=data_files, sep='\t', column_names=["input", "output"])
return raw_datasets
def list_datasets_and_their_splits(data_path):
"""
data_path (str): The directory that include all the dataset files
returns:
dataset_names (list of str)
splits_mapping (dict, key in dataset_names): values are the available splits
"""
avail_datasets = os.listdir(data_path)
dataset_names = []
splits_mapping = dict()
for dir in avail_datasets:
if 'orig' not in dir and '_hp' not in dir:
dataset_names.append(dir)
avail_splits = os.listdir(data_path +'/' + dir)
# Add splits to the dict mapping
for split in avail_splits:
if '_split' in split:
if dir not in splits_mapping:
splits_mapping[dir] = []
splits_mapping[dir].append(re.sub('_split', '', split))
return dataset_names, splits_mapping
|
CompGenRep_MLRC2022-main
|
utils/helper_utils/helper_methods.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
import logging
import os
import re
import sys
import json
import torch
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
import pdb
import datasets
from datasets import load_dataset, load_metric
from ast import literal_eval
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed,
AdamW,
Adafactor,
get_scheduler,
)
from transformers.trainer_utils import EvalLoopOutput, EvalPrediction, get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from trainer_seq2seq_sp import SemanticParsingSeq2SeqTrainer
torch.cuda.empty_cache()
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0")
require_version("datasets>=1.8.0")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Path to directory to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
is_tuning: bool = field(
default=False,
metadata={
"help": "Whether we are tunning hyperparameters. "
"If True, will automatically split the training set into validation set "
},
)
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data file to evaluate the perplexity on (a text file)."},
)
max_seq_length: int = field(
default=512,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_output_length: int = field(
default=512,
metadata={
"help": "The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
},
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch (which can "
"be faster on GPU but will be slower on TPU)."
},
)
n_best_size: int = field(
default=20,
metadata={"help": "The total number of n-best predictions to generate when looking for an answer."},
)
num_beams: Optional[int] = field(
default=20,
metadata={
"help": "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
def __post_init__(self):
if (
self.dataset_name is None
and self.train_file is None
and self.validation_file is None
and self.test_file is None
):
raise ValueError("Need either a dataset name or a training/validation file/test_file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "tsv"], "`train_file` should be a csv or tsv file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "tsv"], "`validation_file` should be a csv or tsv file."
if self.test_file is not None:
extension = self.test_file.split(".")[-1]
assert extension in ["csv", "tsv"], "`test_file` should be a csv or tsv file."
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
if data_args.dataset_name == 'scan':
raw_datasets = raw_datasets.rename_column('commands', 'input')
raw_datasets = raw_datasets.rename_column('actions', 'output')
# Temporaraily set val to be test
raw_datasets["validation"] = raw_datasets["test"]
logger.warning(f"Changed column names of SCAN dataset into {raw_datasets}")
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
if extension == "tsv":
# When extension is tsv, it follows NQG format and will not have column names
raw_datasets = load_dataset("csv", data_files=data_files, sep='\t', column_names=["input", "output"])
else:
raw_datasets = load_dataset(extension, data_files=data_files, sep='\t')
if data_args.is_tuning:
raw_datasets = raw_datasets['train'].train_test_split(test_size=0.1)
raw_datasets['validation'] = raw_datasets['test']
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
)
model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
model.resize_token_embeddings(len(tokenizer))
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
# Temporarily set max_answer_length for training.
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
max_answer_length = min(data_args.max_output_length, tokenizer.model_max_length)
def preprocess_function(examples):
inputs = examples['input']
if 't5' in model_args.model_name_or_path or 'COGS' not in data_args.train_file:
inputs = ['semanticparse: ' + x for x in inputs]
else:
inputs = [x for x in inputs]
targets = examples['output']
model_inputs = tokenizer(inputs, max_length=max_seq_length, padding=padding, truncation=True, return_offsets_mapping=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_answer_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
# Create train feature from dataset
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
desc="Running tokenizer on train dataset",
)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_examples = raw_datasets["validation"]
# Validation Feature Creation
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_examples.map(
preprocess_function,
batched=True,
desc="Running tokenizer on validation dataset",
)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
metric = load_metric("exact_match")
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids, ignore_case=True, ignore_punctuation=True, regexes_to_ignore=' ')
# Post-processing:
def post_processing_function(
examples: datasets.Dataset, features: datasets.Dataset, outputs: EvalLoopOutput, stage="eval"
):
# Decode the predicted tokens.
preds = outputs.predictions
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds)
decoded_preds = [pred.replace(" β ", "<").replace("<pad> ", "").replace("<pad>", "").replace("</s>", "").replace("<unk>", "<").replace("<s>", "") for pred in decoded_preds]
predictions = []
raw_references = []
# Fix white space
def white_space_fix(text):
return " ".join(text.split())
# Let's loop over all the examples!
for i in range(len(features)):
predictions.append(white_space_fix(decoded_preds[i]))
raw_references.append(white_space_fix(features[i]['output'].replace(" ,", ",")))
# Save predictions
prefix = 'eval'
prediction_file = os.path.join(
training_args.output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json"
)
logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(predictions, indent=4) + "\n")
# Save ground truth
ground_truth_file = os.path.join(
training_args.output_dir, "golds.json" if prefix is None else f"{prefix}_golds.json"
)
logger.info(f"Saving predictions to {ground_truth_file}.")
with open(ground_truth_file, "w") as writer:
writer.write(json.dumps(raw_references, indent=4) + "\n")
return EvalPrediction(predictions=predictions, label_ids=raw_references)
# Initialize optimizer and scheduler
if training_args.do_train:
if 't5' in model_args.model_name_or_path:
# optimizer = AdamW(model.parameters(), lr=training_args.learning_rate, weight_decay=0.01)
optimizer = Adafactor(model.parameters(), lr=training_args.learning_rate, relative_step=False)
else:
optimizer = AdamW(model.parameters(), lr=training_args.learning_rate, weight_decay=0.01)
lr_scheduler = get_scheduler('linear', optimizer, num_warmup_steps=0, num_training_steps= training_args.num_train_epochs * (len(train_dataset) // training_args.per_device_train_batch_size))
# Initialize our Trainer
if training_args.do_train:
trainer = SemanticParsingSeq2SeqTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
eval_examples=eval_examples if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
optimizers=(optimizer, lr_scheduler),
# generation_num_beams=data_args.num_beams,
post_process_function=post_processing_function,
# num_beams=data_args.num_beams,
)
else:
trainer = SemanticParsingSeq2SeqTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
eval_examples=eval_examples if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
post_process_function=post_processing_function,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
metrics["train_samples"] = len(train_dataset)
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
results = {}
max_length = (
data_args.max_seq_length
)
num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
if training_args.do_eval and not training_args.do_predict:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix="eval")
max_eval_samples = len(eval_dataset)
metrics["eval_samples"] = len(eval_dataset)
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.do_predict:
res = trainer.predict(eval_dataset)
# Save the prediction files for spider evaluation
prediction_list = []
for pred_idx, pred_id in enumerate(res.predictions):
prediction_list.append(pred_id)
# Output to result dir
base_dir = os.environ["BASE_DIR"]
# Strip the dataset name and split
test_list = data_args.validation_file.split('/')
dataset_name = test_list[test_list.index('data') + 1]
split = test_list[test_list.index('data') + 2].split('_')[0]
if 't5' in model_args.model_name_or_path:
model_name = 't5'
else:
model_name = 'bart'
logger.info("Writing model predictions to txt file...")
with open(base_dir + '/results/predictions/' + dataset_name + '/' + model_name + '_' + split + '.txt', 'w') as f:
for line in prediction_list:
f.write(f"{line}\n")
if __name__ == "__main__":
main()
|
CompGenRep_MLRC2022-main
|
hf_training/fine_tune_t5.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A subclass of `Trainer` specific to Semantic Parsing tasks
"""
from typing import Dict, List, Optional, Union, Tuple, Any
import torch
from torch import nn
from torch.utils.data import Dataset
import transformers
from transformers import Seq2SeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import pdb
class SemanticParsingSeq2SeqTrainer(Seq2SeqTrainer):
def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):
super().__init__(*args, **kwargs)
self.eval_examples = eval_examples
self.post_process_function = post_process_function
# def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metric_key_prefix: str = "eval"):
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
eval_examples=None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
max_length: Optional[int] = None,
num_beams: Optional[int] = None,
) -> Dict[str, float]:
self._max_length = max_length if max_length is not None else self.args.generation_max_length
self._num_beams = num_beams if num_beams is not None else self.args.generation_num_beams
eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset
eval_dataloader = self.get_eval_dataloader(eval_dataset)
eval_examples = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if compute_metrics is None else None,
ignore_keys=ignore_keys,
)
# pdb.set_trace()
finally:
self.compute_metrics = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
eval_preds = self.post_process_function(eval_examples, eval_dataset, output)
metrics = self.compute_metrics(eval_preds)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
self.log(metrics)
else:
metrics = {}
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
return metrics
def predict(self, predict_dataset, predict_examples=None, ignore_keys=None, metric_key_prefix: str = "test"):
self._max_length = self.args.generation_max_length
self._num_beams = self.args.generation_num_beams
predict_dataloader = self.get_test_dataloader(predict_dataset)
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
predict_examples = self.eval_examples if predict_examples is None else predict_examples
try:
output = eval_loop(
predict_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if compute_metrics is None else None,
ignore_keys=ignore_keys,
)
finally:
self.compute_metrics = compute_metrics
if self.post_process_function is None:
return output
predictions = self.post_process_function(predict_examples, predict_dataset, output, "predict")
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=None)
|
CompGenRep_MLRC2022-main
|
hf_training/trainer_seq2seq_sp.py
|
#!/usr/bin496/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
import sys
import os
import time
import argparse
import json
import random
import shutil
import copy
import pickle
import torch
from torch import cuda
import numpy as np
import time
import logging
from tokenizer import Tokenizer
from utils import *
from torch.nn.init import xavier_uniform_
from torch.nn.utils.rnn import pad_sequence
parser = argparse.ArgumentParser()
parser.add_argument('--resume_from_checkpoint', default=False, type=bool)
parser.add_argument('--nqg_dataset', default=False, type=bool)
parser.add_argument('--train_file', default='data/SCAN/tasks_train_length.txt')
parser.add_argument('--dev_file', default='data/SCAN/tasks_test_simple.txt')
parser.add_argument('--save_path', default='model.pt', help='where to save the model')
parser.add_argument('--min_freq', default=1, type=int)
parser.add_argument('--sent_max_length_x', default=100, type=int)
parser.add_argument('--sent_max_length_y', default=100, type=int)
# Encoder
parser.add_argument('--enc_dim', default=256, type=int)
parser.add_argument('--enc_layers', default=0, type=int)
parser.add_argument('--enc_dropout', default=0.0, type=float)
# Decoder
parser.add_argument('--pt_states', default=0, type=int)
parser.add_argument('--nt_states', default=0, type=int)
parser.add_argument('--src_pt_states', default=1, type=int)
parser.add_argument('--src_nt_states', default=10, type=int)
parser.add_argument('--dec_dim', default=256, type=int)
parser.add_argument('--dec_dropout', default=0.0, type=float)
parser.add_argument('--dec_layers', default=3, type=int)
parser.add_argument('--dec_nt_span_min', default=2, type=int)
parser.add_argument('--dec_nt_span_max', default=1000, type=int)
parser.add_argument('--dec_pt_span_min', default=1, type=int)
parser.add_argument('--dec_pt_span_max', default=1, type=int)
parser.add_argument('--rule_constraint_type', default=1, type=int)
# Parser
parser.add_argument('--parser_pt_states', default=20, type=int)
parser.add_argument('--parser_nt_states', default=20, type=int)
parser.add_argument('--parser_dim', default=256, type=int)
# Optimization
parser.add_argument('--num_epochs', default=15, type=int, help='number of training epochs')
parser.add_argument('--lr', default=5e-4, type=float, help='starting learning rate')
parser.add_argument('--weight_decay', default=1e-5, type=float, help='l2 weight decay')
parser.add_argument('--max_grad_norm', default=3, type=float, help='gradient clipping parameter')
parser.add_argument('--beta1', default=0.75, type=float, help='beta1 for adam')
parser.add_argument('--beta2', default=0.999, type=float, help='beta2 for adam')
parser.add_argument('--gpu', default=0, type=int, help='which gpu to use')
parser.add_argument('--seed', default=17, type=int, help='random seed')
parser.add_argument('--print_every', type=int, default=1000, help='print stats after N examples')
parser.add_argument('--print_trees', type=int, default=1, help='print trees')
parser.add_argument('--eval_every', type=int, default=1000, help='eval on dev set after N examples')
parser.add_argument('--update_every', type=int, default=4, help='grad update after N examples')
import pdb
def get_data(data_file):
data = []
for d in open(data_file, "r"):
src, tgt = d.split("IN: ")[1].split(" OUT: ")
src = src.strip().split()
tgt = tgt.strip().split()
if len(src) == 1 or len(tgt) == 1:
src = src + src
tgt = tgt + tgt
data.append({"src": src, "tgt": tgt})
return data
def get_other_data(data_file, sent_max_length_x, sent_max_length_y):
"""
Added, loading tsv files instead
"""
data = []
num_data_removed = 0
for d in open(data_file, "r"):
src, tgt = d.split("\t")
src = src.strip().split()
tgt = tgt.strip().split()
# Testin, otherwise it will lead to OOM
if len(src) == 1 or len(tgt) == 1:
src = src + src
tgt = tgt + tgt
# Truncate instaces that are too long
if len(tgt) > sent_max_length_y:
tgt = tgt[:sent_max_length_y]
if len(src) > sent_max_length_x:
src = src[:sent_max_length_x]
data.append({"src": src, "tgt": tgt})
return data
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
cuda.set_device(args.gpu)
device = torch.device("cuda:"+str(args.gpu))
if args.nqg_dataset:
train_data = get_other_data(args.train_file, args.sent_max_length_x, args.sent_max_length_y)
# if "COGS" in args.train_file:
# val_data = get_other_data(args.dev_file)
else:
train_data = get_data(args.train_file)
val_data = get_data(args.dev_file)
x_tokenizer = Tokenizer()
x_tokenizer.train([d["src"] for d in train_data])
y_tokenizer = Tokenizer()
y_tokenizer.train([d["tgt"] for d in train_data])
from models import BinaryTreeLSTM as Encoder
from models import NeuralQCFG as Decoder
from models import NeuralPCFG as Parser
encoder = Encoder(vocab = len(x_tokenizer.vocab2idx),
dim = args.enc_dim,
dropout = args.enc_dropout,
layers = args.enc_layers)
decoder = Decoder(vocab = len(y_tokenizer.vocab2idx),
dim = args.dec_dim,
num_layers = args.dec_layers,
pt_states = args.pt_states,
nt_states = args.nt_states,
src_dim = args.enc_dim,
src_pt_states = args.src_pt_states,
src_nt_states = args.src_nt_states,
dropout = args.dec_dropout,
rule_constraint_type = args.rule_constraint_type,
nt_span_range = [args.dec_nt_span_min, args.dec_nt_span_max],
pt_span_range = [args.dec_pt_span_min, args.dec_pt_span_max])
parser = Parser(vocab = len(x_tokenizer.vocab2idx),
dim = args.parser_dim,
nt_states = args.parser_nt_states,
pt_states = args.parser_pt_states)
if args.resume_from_checkpoint:
model_checkpoint = torch.load(args.save_path)
encoder = model_checkpoint["encoder"]
decoder = model_checkpoint["decoder"]
parser = model_checkpoint["parser"]
x_tokenizer = model_checkpoint["x_tokenizer"]
y_tokenizer = model_checkpoint["y_tokenizer"]
model_args = model_checkpoint["args"]
encoder.to(device)
decoder.to(device)
parser.to(device)
model = torch.nn.ModuleList([encoder, decoder, parser])
for m in [encoder, decoder, parser]:
for name, param in m.named_parameters():
if param.dim() > 1:
xavier_uniform_(param)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
betas = (args.beta1, args.beta2),
weight_decay = args.weight_decay)
best_val_ppl = 1e5
epoch = 0
b = 0
model.to(device)
torch.autograd.set_detect_anomaly(True)
model.train()
while epoch < args.num_epochs:
start_time = time.time()
epoch += 1
print('Starting epoch: %d' % (epoch))
train_nll = 0.
src_nll = 0.
tgt_nll = 0.
num_sents = 0.
num_src_words = 0.
num_words = 0.
random.shuffle(train_data)
# Amount of instances without result because it's output is too long
# that will lead to OOM
num_no_res = 0
for d in train_data:
b += 1
x = [d["src"]]
y = [d["tgt"]]
x_tensor, _, _ = x_tokenizer.convert_batch(x)
y_tensor, _, _ = y_tokenizer.convert_batch(y)
x_tensor, y_tensor = x_tensor.to(device), y_tensor.to(device)
x_lengths = torch.Tensor([len(d["src"])]).long().to(device)
y_lengths = torch.Tensor([len(d["tgt"])]).long().to(device)
# # Added because of OOM
# if y_lengths[0] > 50:
# num_no_res += 1
# # num_examples += 1
# continue
# print(y_lengths, x_tensor, x_lengths, d["src"])
parse_sample, parse_argmax, parse_log_prob, parse_actions, parse_nll = parser(
x_tensor, x_lengths)
node_features, node_spans = encoder(x_tensor, x_lengths, spans = parse_sample)
nll = decoder(y_tensor, y_lengths, node_features, node_spans,
x_str = y, argmax=False)
dec_loss = nll.mean()
(dec_loss / args.update_every).backward()
train_nll += nll.sum().item()
with torch.no_grad():
node_features_argmax, node_spans_argmax = encoder(x_tensor, x_lengths,
spans = parse_argmax)
nll_argmax = decoder(y_tensor, y_lengths, node_features_argmax, node_spans_argmax,
x_str = y, argmax=False)
neg_reward = (nll - nll_argmax).detach().item()
obj = (neg_reward*parse_log_prob).mean() + parse_nll.mean()
(obj / args.update_every).backward()
src_nll += parse_nll.sum().item()
if b % args.update_every == 0:
torch.nn.utils.clip_grad_norm_(parser.parameters(), args.max_grad_norm)
torch.nn.utils.clip_grad_norm_(encoder.parameters(), args.max_grad_norm)
torch.nn.utils.clip_grad_norm_(decoder.parameters(), args.max_grad_norm)
optimizer.step()
optimizer.zero_grad()
num_sents += 1
num_words += y_lengths.sum().item()
num_src_words += x_lengths.sum().item()
if b % args.print_every == 0:
enc_param_norm = sum([p.norm()**2 for p in encoder.parameters()]).item()**0.5
dec_param_norm = sum([p.norm()**2 for p in decoder.parameters()]).item()**0.5
parser_param_norm = sum([p.norm()**2 for p in parser.parameters()]).item()**0.5
log_str = 'Epoch: %d, Batch: %d/%d, |EncParam|: %.4f, |DecParam|: %.4f, ' + \
'|SrcParserParam|: %.4f, LR: %.4f, SrcPPL: %.4f, ' + \
'PPL: %.4f, ValPPL: %.4f, ' + \
'Throughput: %.2f examples/sec'
print("-"*80)
print(log_str %
(epoch, b, len(train_data),
enc_param_norm, dec_param_norm, parser_param_norm,
args.lr, np.exp(src_nll / num_src_words),
np.exp(train_nll / num_words), best_val_ppl,
num_sents / (time.time() - start_time)))
print("-"*80)
if args.print_trees == 1:
print("")
with torch.no_grad():
y_tree, all_spans, all_spans_node = decoder(
y_tensor, y_lengths, node_features, node_spans,
x_str = y, argmax=True)
x_str = [x_tokenizer.idx2vocab[idx] for idx in x_tensor[0].tolist()]
y_str = [y_tokenizer.idx2vocab[idx] for idx in y_tensor[0].tolist()]
x_length = x_lengths[0].item()
y_length = y_lengths[0].item()
print("Source: %s\nTarget: %s" % (" ".join(x_str), " ".join(y_str)))
print("")
print("Source Tree: %s" % get_tree(parse_actions[0], x_str))
action = get_actions(y_tree[0])
print("QCFG Tree: %s" % get_tree(action, y_str))
print("")
for span, span_node in zip(all_spans[0], all_spans_node[0]):
if span_node[0] == -1:
if span[0] == span[1]:
x_span = "T" + str(span_node[2])
else:
x_span = "NT" + str(span_node[2])
else:
x_span = " ".join(x_str[span_node[0]:span_node[1]+1])
y_span = " ".join(y_str[span[0]:span[1]+1])
if span[0] == span[1]:
denom = len(decoder.pt_spans[0])
else:
denom = len(decoder.nt_spans[0])
print((y_span, x_span, "N" + str(span[2] // denom)))
if b % args.eval_every == 0 and epoch > 1 and not args.nqg_dataset:
print('--------------------------------')
print('Checking validation perf...')
if not args.nqg_dataset:
# if not args.nqg_dataset or "COGS" in args.train_file:
val_ppl = eval(val_data, encoder, decoder, parser, device,
x_tokenizer, y_tokenizer)
print('--------------------------------')
if val_ppl < best_val_ppl:
best_val_ppl = val_ppl
checkpoint = {
'args': args.__dict__,
'encoder': encoder.cpu(),
'decoder': decoder.cpu(),
'parser': parser.cpu(),
'x_tokenizer': x_tokenizer,
'y_tokenizer': y_tokenizer,
}
print('Saving checkpoint to %s' % args.save_path)
torch.save(checkpoint, args.save_path)
model.to(device)
if best_val_ppl < 1.01:
assert False
if args.nqg_dataset:
# if args.nqg_dataset or "COGS" not in args.train_file:
# No dev set, directly save the final checkpoint
checkpoint = {
'args': args.__dict__,
'encoder': encoder.cpu(),
'decoder': decoder.cpu(),
'parser': parser.cpu(),
'x_tokenizer': x_tokenizer,
'y_tokenizer': y_tokenizer,
}
print('Saving checkpoint to %s' % args.save_path)
torch.save(checkpoint, args.save_path)
model.to(device)
print("Number of too long examples: ", num_no_res)
def eval(data, encoder, decoder, parser, device, x_tokenizer, y_tokenizer):
encoder.eval()
decoder.eval()
parser.eval()
num_sents = 0
num_words = 0
total_nll = 0.
b = 0
for d in data:
if any([s not in x_tokenizer.vocab2idx for s in d["src"]]) or \
any([s not in y_tokenizer.vocab2idx for s in d["tgt"]]):
continue
b += 1
x = [d["src"]]
y = [d["tgt"]]
x_tensor, _, _ = x_tokenizer.convert_batch(x)
y_tensor, _, _ = y_tokenizer.convert_batch(y)
x_tensor, y_tensor = x_tensor.to(device), y_tensor.to(device)
x_lengths = torch.Tensor([len(d["src"])]).long().to(device)
y_lengths = torch.Tensor([len(d["tgt"])]).long().to(device)
parse_nll, parse_argmax, _ = parser.forward_nll_argmax(x_tensor, x_lengths)
with torch.no_grad():
node_features, node_spans = encoder(x_tensor, x_lengths, spans = parse_argmax)
new_spans = []
for span, x_str in zip(node_spans, x):
new_span = []
for s in span:
new_span.append([s[0], s[1], x_str[s[0]:s[1]+1]])
new_spans.append(new_span)
node_spans = new_spans
nll = decoder(y_tensor, y_lengths, node_features, node_spans,
x_str = y, argmax=False)
total_nll += nll.sum().item()
num_words += y_lengths.sum().item()
ppl = np.exp(total_nll / num_words)
print('PPL: %.4f' % ppl)
encoder.train()
decoder.train()
parser.train()
return ppl
if __name__ == '__main__':
start_time = time.time()
args = parser.parse_args()
main(args)
print("--- %s seconds ---" % (time.time() - start_time))
# Adding set_trace to enforce slurm to output log
pdb.set_trace()
|
CompGenRep_MLRC2022-main
|
baseline_replication/neural-qcfg/train_scan.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
### Convert tsv data to csv format for transformers training
import re
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string("tsv", "", "Input tsv file.")
flags.DEFINE_string("output", "", "Output tsv file.")
def main(unused_argv):
with open(FLAGS.tsv, 'r') as tsv_file:
with open(FLAGS.output, 'w') as csv_file:
for line in tsv_file:
# remove the type column
csv_file.write(line.split("\t")[0] + "\t" + line.split("\t")[1] + "\n")
print("Writting done")
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/COGS/convert_to_nqg_format.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
### Convert tsv data to csv format for transformers training
import re
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string("tsv", "", "Input tsv file.")
flags.DEFINE_string("csv", "", "Output csv file.")
def main(unused_argv):
with open(FLAGS.tsv, 'r') as tsv_file:
with open(FLAGS.csv, 'w') as csv_file:
csv_file.write('input\toutput\ttype\n')
for line in tsv_file:
csv_file.write(line)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/COGS/convert_to_csv.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
### Convert tsv data to csv format for transformers training
import re
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string("tsv", "", "Input tsv file.")
flags.DEFINE_string("csv", "", "Output csv file.")
def main(unused_argv):
with open(FLAGS.tsv, 'r') as tsv_file:
with open(FLAGS.csv, 'w') as csv_file:
csv_file.write('input\toutput\n')
for line in tsv_file:
csv_file.write(line)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/convert_to_csv.py
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/__init__.py
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Strip targets from a tsv file and write as newline-separated txt.
This file can be useful as input to generate predictions (e.g. for evaluation).
"""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
import pdb
from tasks import tsv_utils
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_string("output", "", "Output txt file.")
flags.DEFINE_string("prefix", "", "Optional prefix to prepend to source.")
def main(unused_argv):
examples = tsv_utils.read_tsv(FLAGS.input)
with gfile.GFile(FLAGS.output, "w") as txt_file:
for example in examples:
txt_file.write("%s%s\n" % (FLAGS.prefix, example[0]))
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/strip_targets.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Split tsv dataset file based on predefined sets of example ids."""
import json
import os
import sys
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import tsv_utils
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_string("split", "", "Json split file.")
flags.DEFINE_string("output_dir", "", "Output directory for dataset files.")
def load_splits():
"""Reads a JSON file containing split IDs.
Returns:
A dictionary where keys are a split name (e.g. `train` or `test`) and values
are a list of integer example IDs.
"""
with gfile.GFile(FLAGS.split, "r") as reader:
text = reader.read()
splits = json.loads(text)
return splits
def main(unused_argv):
splits = load_splits()
examples = tsv_utils.read_tsv(FLAGS.input)
example_id_to_example = {
example_id: example for example_id, example in enumerate(examples)
}
for split, split_ids in splits.items():
examples = []
for split_id in split_ids:
examples.append(example_id_to_example[split_id])
filename = os.path.join(FLAGS.output_dir, "%s.tsv" % split)
tsv_utils.write_tsv(examples, filename)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/split_dataset.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Utilties for reading and writing files.
Expected format for TSV file is that each line has one example, with each
element separated by \t. The number of element should be the same as
expected_num_columns.
Expected format for examples in memory is a list where each element is:
(element_1, element_2, ...), or [element_1, element_2, ...]
The number of element should be the same as expected_num_columns.
"""
from tensorflow.io import gfile
def read_tsv(filename, expected_num_columns=2):
"""Read file to list of examples."""
examples = []
with gfile.GFile(filename, "r") as tsv_file:
for line in tsv_file:
line = line.rstrip()
cols = line.split("\t")
if len(cols) != expected_num_columns:
raise ValueError("Line '%s' has %s columns (%s)" %
(line, len(cols), cols))
examples.append(cols)
print("Loaded %s examples from %s." % (len(examples), filename))
return examples
def write_tsv(examples, filename, expected_num_columns=2):
"""Write examples to tsv file."""
with gfile.GFile(filename, "w") as tsv_file:
for example in examples:
if len(example) != expected_num_columns:
raise ValueError("Example '%s' has %s columns." %
(example, len(example)))
example = "\t".join(example)
line = "%s\n" % example
tsv_file.write(line)
print("Wrote %s examples to %s." % (len(examples), filename))
def merge_shared_tsvs(filename):
"""Merge multiple tsv files into one."""
output_files = gfile.glob("%s-*-of-*" % filename)
all_examples = []
for output_file in output_files:
examples = read_tsv(output_file)
all_examples.extend(examples)
write_tsv(all_examples, filename)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/tsv_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert SCAN txt format to standard TSV format."""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import tsv_utils
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input txt file.")
flags.DEFINE_string("output", "", "Output tsv file.")
def load_examples(filename):
"""Load SCAN examples from original data file."""
examples = []
with gfile.GFile(filename, "r") as input_file:
for line in input_file:
splits = line.split("OUT:")
# Trim "IN:" prefix.
input_string = splits[0][3:].strip()
output_string = splits[1].strip()
examples.append((input_string, output_string))
return examples
def main(unused_argv):
examples = load_examples(FLAGS.input)
tsv_utils.write_tsv(examples, FLAGS.output)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/scan/convert_to_tsv.py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocesses a specific split of the CFQ dataset."""
from absl import app
from absl import flags
import preprocess as preprocessor
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', None,
'Name of the TFDS dataset. Use cfq or scan.')
flags.DEFINE_string('split', None, 'Name of the to the JSON file containing '
'split information.')
flags.DEFINE_string('save_path', None, 'Path to the directory where to '
'save the files to.')
flags.mark_flag_as_required('save_path')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
dataset = preprocessor.get_dataset_from_tfds(FLAGS.dataset, FLAGS.split)
preprocessor.write_dataset(dataset, FLAGS.save_path)
token_vocab = preprocessor.get_token_vocab(FLAGS.save_path)
preprocessor.write_token_vocab(token_vocab, FLAGS.save_path)
if __name__ == '__main__':
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/scan/preprocess_main.py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for preprocessing the CFQ dataset."""
import collections
import os
import string
from typing import Any, Dict, List, Tuple
from absl import logging
from tensorflow.compat.v1.io import gfile
import tensorflow_datasets as tfds
Dataset = Dict[str, List[Tuple[str, str]]]
def tokenize_punctuation(text):
text = map(lambda c: ' %s ' % c if c in string.punctuation else c, text)
return ' '.join(''.join(text).split())
def preprocess_sparql(query):
"""Do various preprocessing on the SPARQL query."""
# Tokenize braces.
query = query.replace('count(*)', 'count ( * )')
tokens = []
for token in query.split():
# Replace 'ns:' prefixes.
if token.startswith('ns:'):
token = token[3:]
# Replace mid prefixes.
if token.startswith('m.'):
token = 'm_' + token[2:]
tokens.append(token)
return ' '.join(tokens).replace('\\n', ' ')
def get_encode_decode_pair(sample):
# Apply some simple preprocessing on the tokenizaton, which improves the
# performance of the models significantly.
encode_text = tokenize_punctuation(sample['questionPatternModEntities'])
decode_text = preprocess_sparql(sample['sparqlPatternModEntities'])
return (encode_text, decode_text)
def get_dataset_from_tfds(dataset, split):
"""Load dataset from TFDS and do some basic preprocessing."""
logging.info('Loading dataset via TFDS.')
allsplits = tfds.load(dataset + '/' + split, as_supervised=True)
if 'validation' in allsplits:
# CFQ and divergence splits of StarCFQ have all three sets.
split_names = {'train': 'train', 'dev': 'validation', 'test': 'test'}
else:
# Scan and non-divergence splits of StarCFQ have 'train' and 'test' sets
# only. We simply output the test set as both dev and test. We only really
# use the dev set but t2t-datagen expects all three.
split_names = {'train': 'train', 'dev': 'test', 'test': 'test'}
dataset = collections.defaultdict(list)
for cfq_split_name, tfds_split_name in split_names.items():
for raw_x, raw_y in tfds.as_numpy(allsplits[tfds_split_name]):
encode_decode_pair = (tokenize_punctuation(raw_x.decode()),
preprocess_sparql(raw_y.decode()))
dataset[cfq_split_name].append(encode_decode_pair)
size_str = ', '.join(f'{s}={len(dataset[s])}' for s in split_names)
logging.info('Finished loading splits. Size: %s', size_str)
return dataset
def write_dataset(dataset, save_path):
"""Saves the given dataset into the given location."""
if not dataset:
logging.info('No dataset to write.')
return
logging.info('Writing dataset to %s', save_path)
for split_name, list_of_input_output_pairs in dataset.items():
folder_name = os.path.join(save_path, split_name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
encode_name = os.path.join(folder_name, '%s_encode.txt' % split_name)
decode_name = os.path.join(folder_name, '%s_decode.txt' % split_name)
with gfile.GFile(encode_name,
'w') as encode_f, gfile.GFile(decode_name,
'w') as decode_f:
for pair in list_of_input_output_pairs:
encode_f.write(pair[0] + '\n')
decode_f.write(pair[1] + '\n')
logging.info('Dataset written to %s', save_path)
def write_token_vocab(words,
save_path,
problem = 'cfq'):
""""Writes token vocabulary from @words to @save_path."""
# Sort tokens by frequency and then lexically to break ties.
words_with_counts = words.most_common()
words_with_counts.sort(key=lambda x: (x[1], x[0]), reverse=True)
vocab_path = os.path.join(save_path, 'vocab.%s.tokens' % problem)
with gfile.GFile(vocab_path, 'w') as f:
# Tensor2tensor needs these additional tokens.
f.write('<pad>\n<EOS>\n<OOV>\n')
for word, _ in words_with_counts:
f.write(word + '\n')
logging.info('Token vocabulary written to %s (%s distinct tokens).',
vocab_path, len(words))
def get_lines(path, filename):
with gfile.GFile(os.path.join(path, 'train', filename)) as f:
lines = [l.strip() for l in f.readlines() if l.strip()]
return lines
def get_token_vocab(path):
words = collections.Counter()
lines = get_lines(path, 'train_encode.txt')
lines.extend(get_lines(path, 'train_decode.txt'))
for line in lines:
words.update(line.split(' '))
return words
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/scan/preprocess.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Join source and target text files generated for MCD splits to TSV."""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import tsv_utils
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("source", "", "Source txt file.")
flags.DEFINE_string("target", "", "Target txt file.")
flags.DEFINE_string("output", "", "Joined tsv file.")
def read_examples(source_file, target_file):
"""Return list of (source, target) tuples."""
sources = []
targets = []
with gfile.GFile(source_file, "r") as txt_file:
for line in txt_file:
sources.append(line.rstrip("\n"))
with gfile.GFile(target_file, "r") as txt_file:
for line in txt_file:
targets.append(line.rstrip("\n"))
examples = list(zip(sources, targets))
return examples
def main(unused_argv):
examples = read_examples(FLAGS.source, FLAGS.target)
tsv_utils.write_tsv(examples, FLAGS.output)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/scan/join_txt_to_tsv.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run SQL parser on dataset to verify all targets have exactly 1 parse."""
from absl import app
from absl import flags
from language.compgen.nqg.tasks import tsv_utils
from language.compgen.nqg.tasks.spider import sql_parser
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_integer("offset", 0, "Example index to start at. Ignored if 0.")
flags.DEFINE_integer("limit", 0, "Example index to stop at. Ignored if 0.")
def main(unused_argv):
examples = tsv_utils.read_tsv(FLAGS.input)
for idx, (_, target) in enumerate(examples):
if FLAGS.offset and idx < FLAGS.offset:
continue
if FLAGS.limit and idx >= FLAGS.limit:
break
print("Processing example %s." % idx)
try:
_ = sql_parser.parse_sql(target)
except ValueError as e:
print(e)
# Retry parsing with verbose debugging.
_ = sql_parser.parse_sql(target, verbose=True)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/sql_parser_main.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pre-tokenize dataset for NQG which uses space-separated tokenization.
Input should be a TSV file, e.g. generated by applying `split_dataset.py` to
the output ofr `spider/write_dataset.py`.
"""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import tsv_utils
from tasks.spider import nqg_tokenization
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_string("output", "", "Output tsv file.")
def main(unused_argv):
examples = tsv_utils.read_tsv(FLAGS.input)
new_examples = []
for source, target in examples:
new_examples.append((nqg_tokenization.process_source(source),
nqg_tokenization.process_target(target)))
tsv_utils.write_tsv(new_examples, FLAGS.output)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/nqg_preprocess.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
################################
# val: number(float)/string(str)/sql(dict)
# col_unit: (agg_id, col_id, isDistinct(bool))
# val_unit: (unit_op, col_unit1, col_unit2)
# table_unit: (table_type, col_unit/sql)
# cond_unit: (not_op, op_id, val_unit, val1, val2)
# condition: [cond_unit1, 'and'/'or', cond_unit2, ...]
# sql {
# 'select': (isDistinct(bool), [(agg_id, val_unit), (agg_id, val_unit), ...])
# 'from': {'table_units': [table_unit1, table_unit2, ...], 'conds': condition}
# 'where': condition
# 'groupBy': [col_unit1, col_unit2, ...]
# 'orderBy': ('asc'/'desc', [val_unit1, val_unit2, ...])
# 'having': condition
# 'limit': None/limit value
# 'intersect': None/sql
# 'except': None/sql
# 'union': None/sql
# }
################################
from __future__ import print_function
import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
# Flag to disable value evaluation
DISABLE_VALUE = True
# Flag to disable distinct in select evaluation
DISABLE_DISTINCT = True
CLAUSE_KEYWORDS = ('select', 'from', 'where', 'group', 'order', 'limit', 'intersect', 'union', 'except')
JOIN_KEYWORDS = ('join', 'on', 'as')
WHERE_OPS = ('not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists')
UNIT_OPS = ('none', '-', '+', "*", '/')
AGG_OPS = ('none', 'max', 'min', 'count', 'sum', 'avg')
TABLE_TYPE = {
'sql': "sql",
'table_unit': "table_unit",
}
COND_OPS = ('and', 'or')
SQL_OPS = ('intersect', 'union', 'except')
ORDER_OPS = ('desc', 'asc')
HARDNESS = {
"component1": ('where', 'group', 'order', 'limit', 'join', 'or', 'like'),
"component2": ('except', 'union', 'intersect')
}
def condition_has_or(conds):
return 'or' in conds[1::2]
def condition_has_like(conds):
return WHERE_OPS.index('like') in [cond_unit[1] for cond_unit in conds[::2]]
def condition_has_sql(conds):
for cond_unit in conds[::2]:
val1, val2 = cond_unit[3], cond_unit[4]
if val1 is not None and type(val1) is dict:
return True
if val2 is not None and type(val2) is dict:
return True
return False
def val_has_op(val_unit):
return val_unit[0] != UNIT_OPS.index('none')
def has_agg(unit):
return unit[0] != AGG_OPS.index('none')
def accuracy(count, total):
if count == total:
return 1
return 0
def recall(count, total):
if count == total:
return 1
return 0
def F1(acc, rec):
if (acc + rec) == 0:
return 0
return (2. * acc * rec) / (acc + rec)
def get_scores(count, pred_total, label_total):
if pred_total != label_total:
return 0,0,0
elif count == pred_total:
return 1,1,1
return 0,0,0
def eval_sel(pred, label):
pred_sel = pred['select'][1]
label_sel = label['select'][1]
label_wo_agg = [unit[1] for unit in label_sel]
pred_total = len(pred_sel)
label_total = len(label_sel)
cnt = 0
cnt_wo_agg = 0
for unit in pred_sel:
if unit in label_sel:
cnt += 1
label_sel.remove(unit)
if unit[1] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[1])
return label_total, pred_total, cnt, cnt_wo_agg
def eval_where(pred, label):
pred_conds = [unit for unit in pred['where'][::2]]
label_conds = [unit for unit in label['where'][::2]]
label_wo_agg = [unit[2] for unit in label_conds]
pred_total = len(pred_conds)
label_total = len(label_conds)
cnt = 0
cnt_wo_agg = 0
for unit in pred_conds:
if unit in label_conds:
cnt += 1
label_conds.remove(unit)
if unit[2] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[2])
return label_total, pred_total, cnt, cnt_wo_agg
def eval_group(pred, label):
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
pred_total = len(pred_cols)
label_total = len(label_cols)
cnt = 0
pred_cols = [pred.split(".")[1] if "." in pred else pred for pred in pred_cols]
label_cols = [label.split(".")[1] if "." in label else label for label in label_cols]
for col in pred_cols:
if col in label_cols:
cnt += 1
label_cols.remove(col)
return label_total, pred_total, cnt
def eval_having(pred, label):
pred_total = label_total = cnt = 0
if len(pred['groupBy']) > 0:
pred_total = 1
if len(label['groupBy']) > 0:
label_total = 1
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
if pred_total == label_total == 1 \
and pred_cols == label_cols \
and pred['having'] == label['having']:
cnt = 1
return label_total, pred_total, cnt
def eval_order(pred, label):
pred_total = label_total = cnt = 0
if len(pred['orderBy']) > 0:
pred_total = 1
if len(label['orderBy']) > 0:
label_total = 1
if len(label['orderBy']) > 0 and pred['orderBy'] == label['orderBy'] and \
((pred['limit'] is None and label['limit'] is None) or (pred['limit'] is not None and label['limit'] is not None)):
cnt = 1
return label_total, pred_total, cnt
def eval_and_or(pred, label):
pred_ao = pred['where'][1::2]
label_ao = label['where'][1::2]
pred_ao = set(pred_ao)
label_ao = set(label_ao)
if pred_ao == label_ao:
return 1,1,1
return len(pred_ao),len(label_ao),0
def get_nestedSQL(sql):
nested = []
for cond_unit in sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]:
if type(cond_unit[3]) is dict:
nested.append(cond_unit[3])
if type(cond_unit[4]) is dict:
nested.append(cond_unit[4])
if sql['intersect'] is not None:
nested.append(sql['intersect'])
if sql['except'] is not None:
nested.append(sql['except'])
if sql['union'] is not None:
nested.append(sql['union'])
return nested
def eval_nested(pred, label):
label_total = 0
pred_total = 0
cnt = 0
if pred is not None:
pred_total += 1
if label is not None:
label_total += 1
if pred is not None and label is not None:
cnt += Evaluator().eval_exact_match(pred, label)
return label_total, pred_total, cnt
def eval_IUEN(pred, label):
lt1, pt1, cnt1 = eval_nested(pred['intersect'], label['intersect'])
lt2, pt2, cnt2 = eval_nested(pred['except'], label['except'])
lt3, pt3, cnt3 = eval_nested(pred['union'], label['union'])
label_total = lt1 + lt2 + lt3
pred_total = pt1 + pt2 + pt3
cnt = cnt1 + cnt2 + cnt3
return label_total, pred_total, cnt
def get_keywords(sql):
res = set()
if len(sql['where']) > 0:
res.add('where')
if len(sql['groupBy']) > 0:
res.add('group')
if len(sql['having']) > 0:
res.add('having')
if len(sql['orderBy']) > 0:
res.add(sql['orderBy'][0])
res.add('order')
if sql['limit'] is not None:
res.add('limit')
if sql['except'] is not None:
res.add('except')
if sql['union'] is not None:
res.add('union')
if sql['intersect'] is not None:
res.add('intersect')
# or keyword
ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]
if len([token for token in ao if token == 'or']) > 0:
res.add('or')
cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]
# not keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[0]]) > 0:
res.add('not')
# in keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('in')]) > 0:
res.add('in')
# like keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')]) > 0:
res.add('like')
return res
def eval_keywords(pred, label):
pred_keywords = get_keywords(pred)
label_keywords = get_keywords(label)
pred_total = len(pred_keywords)
label_total = len(label_keywords)
cnt = 0
for k in pred_keywords:
if k in label_keywords:
cnt += 1
return label_total, pred_total, cnt
def count_agg(units):
return len([unit for unit in units if has_agg(unit)])
def count_component1(sql):
count = 0
if len(sql['where']) > 0:
count += 1
if len(sql['groupBy']) > 0:
count += 1
if len(sql['orderBy']) > 0:
count += 1
if sql['limit'] is not None:
count += 1
if len(sql['from']['table_units']) > 0: # JOIN
count += len(sql['from']['table_units']) - 1
ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]
count += len([token for token in ao if token == 'or'])
cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]
count += len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')])
return count
def count_component2(sql):
nested = get_nestedSQL(sql)
return len(nested)
def count_others(sql):
count = 0
# number of aggregation
agg_count = count_agg(sql['select'][1])
agg_count += count_agg(sql['where'][::2])
agg_count += count_agg(sql['groupBy'])
if len(sql['orderBy']) > 0:
agg_count += count_agg([unit[1] for unit in sql['orderBy'][1] if unit[1]] +
[unit[2] for unit in sql['orderBy'][1] if unit[2]])
agg_count += count_agg(sql['having'])
if agg_count > 1:
count += 1
# number of select columns
if len(sql['select'][1]) > 1:
count += 1
# number of where conditions
if len(sql['where']) > 1:
count += 1
# number of group by clauses
if len(sql['groupBy']) > 1:
count += 1
return count
class Evaluator:
"""A simple evaluator"""
def __init__(self):
self.partial_scores = None
def eval_hardness(self, sql):
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0:
return "easy"
elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \
(count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0):
return "medium"
elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \
(2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \
(count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1):
return "hard"
else:
return "extra"
def eval_exact_match(self, pred, label):
partial_scores = self.eval_partial_match(pred, label)
self.partial_scores = partial_scores
for _, score in partial_scores.items():
if score['f1'] != 1:
return 0
if len(label['from']['table_units']) > 0:
label_tables = sorted(label['from']['table_units'])
pred_tables = sorted(pred['from']['table_units'])
return label_tables == pred_tables
return 1
def eval_partial_match(self, pred, label):
res = {}
label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_group(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_having(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_order(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_and_or(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_IUEN(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_keywords(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
return res
def isValidSQL(sql, db):
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(sql)
except:
return False
return True
def print_scores(scores, etype):
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
print("{:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels))
counts = [scores[level]['count'] for level in levels]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
print('===================== EXECUTION ACCURACY =====================')
this_scores = [scores[level]['exec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores))
if etype in ["all", "match"]:
print('\n====================== EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[level]['exact'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores))
print('\n---------------------PARTIAL MATCHING ACCURACY----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['acc'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING RECALL ----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['rec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING F1 --------------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['f1'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
def evaluate(gold, predict, db_dir, etype, kmaps):
with open(gold) as f:
glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
with open(predict) as f:
plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
# plist = [("select max(Share),min(Share) from performance where Type != 'terminal'", "orchestra")]
# glist = [("SELECT max(SHARE) , min(SHARE) FROM performance WHERE TYPE != 'Live final'", "orchestra")]
evaluator = Evaluator()
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
entries = []
scores = {}
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0}
eval_err_num = 0
for p, g in zip(plist, glist):
p_str = p[0]
g_str, db = g
db_name = db
db = os.path.join(db_dir, db, db + ".sqlite")
schema = Schema(get_schema(db))
g_sql = get_sql(schema, g_str)
hardness = evaluator.eval_hardness(g_sql)
scores[hardness]['count'] += 1
scores['all']['count'] += 1
try:
p_sql = get_sql(schema, p_str)
except:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {
"conds": [],
"table_units": []
},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [
False,
[]
],
"union": None,
"where": []
}
eval_err_num += 1
print("eval_err_num:{}".format(eval_err_num))
# rebuild sql for value evaluation
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if etype in ["all", "exec"]:
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if exec_score:
scores[hardness]['exec'] += 1.0
scores['all']['exec'] += 1.0
if etype in ["all", "match"]:
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if exact_score == 0:
print("{} pred: {}".format(hardness,p_str))
print("{} gold: {}".format(hardness,g_str))
print("")
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if partial_scores[type_]['pred_total'] > 0:
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if partial_scores[type_]['pred_total'] > 0:
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
entries.append({
'predictSQL': p_str,
'goldSQL': g_str,
'hardness': hardness,
'exact': exact_score,
'partial': partial_scores
})
for level in levels:
if scores[level]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[level]['exec'] /= scores[level]['count']
if etype in ["all", "match"]:
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if scores[level]['partial'][type_]['acc_count'] == 0:
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \
scores[level]['partial'][type_]['acc_count'] * 1.0
if scores[level]['partial'][type_]['rec_count'] == 0:
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \
scores[level]['partial'][type_]['rec_count'] * 1.0
if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = \
2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / (
scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])
print_scores(scores, etype)
def eval_exec_match(db, p_str, g_str, pred, gold):
"""
return 1 if the values between prediction and gold are matching
in the corresponding index. Currently not support multiple col_unit(pairs).
"""
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = tuple(val_unit[1]) if not val_unit[2] else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred['select'][1]]
q_val_units = [unit[1] for unit in gold['select'][1]]
return res_map(p_res, p_val_units) == res_map(q_res, q_val_units)
# Rebuild SQL functions for value evaluation
def rebuild_cond_unit_val(cond_unit):
if cond_unit is None or not DISABLE_VALUE:
return cond_unit
not_op, op_id, val_unit, val1, val2 = cond_unit
if type(val1) is not dict:
val1 = None
else:
val1 = rebuild_sql_val(val1)
if type(val2) is not dict:
val2 = None
else:
val2 = rebuild_sql_val(val2)
return not_op, op_id, val_unit, val1, val2
def rebuild_condition_val(condition):
if condition is None or not DISABLE_VALUE:
return condition
res = []
for idx, it in enumerate(condition):
if idx % 2 == 0:
res.append(rebuild_cond_unit_val(it))
else:
res.append(it)
return res
def rebuild_sql_val(sql):
if sql is None or not DISABLE_VALUE:
return sql
sql['from']['conds'] = rebuild_condition_val(sql['from']['conds'])
sql['having'] = rebuild_condition_val(sql['having'])
sql['where'] = rebuild_condition_val(sql['where'])
sql['intersect'] = rebuild_sql_val(sql['intersect'])
sql['except'] = rebuild_sql_val(sql['except'])
sql['union'] = rebuild_sql_val(sql['union'])
return sql
# Rebuild SQL functions for foreign key evaluation
def build_valid_col_units(table_units, schema):
col_ids = [table_unit[1] for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']]
prefixs = [col_id[:-2] for col_id in col_ids]
valid_col_units= []
for value in schema.idMap.values():
if '.' in value and value[:value.index('.')] in prefixs:
valid_col_units.append(value)
return valid_col_units
def rebuild_col_unit_col(valid_col_units, col_unit, kmap):
if col_unit is None:
return col_unit
agg_id, col_id, distinct = col_unit
if col_id in kmap and col_id in valid_col_units:
col_id = kmap[col_id]
if DISABLE_DISTINCT:
distinct = None
return agg_id, col_id, distinct
def rebuild_val_unit_col(valid_col_units, val_unit, kmap):
if val_unit is None:
return val_unit
unit_op, col_unit1, col_unit2 = val_unit
col_unit1 = rebuild_col_unit_col(valid_col_units, col_unit1, kmap)
col_unit2 = rebuild_col_unit_col(valid_col_units, col_unit2, kmap)
return unit_op, col_unit1, col_unit2
def rebuild_table_unit_col(valid_col_units, table_unit, kmap):
if table_unit is None:
return table_unit
table_type, col_unit_or_sql = table_unit
if isinstance(col_unit_or_sql, tuple):
col_unit_or_sql = rebuild_col_unit_col(valid_col_units, col_unit_or_sql, kmap)
return table_type, col_unit_or_sql
def rebuild_cond_unit_col(valid_col_units, cond_unit, kmap):
if cond_unit is None:
return cond_unit
not_op, op_id, val_unit, val1, val2 = cond_unit
val_unit = rebuild_val_unit_col(valid_col_units, val_unit, kmap)
return not_op, op_id, val_unit, val1, val2
def rebuild_condition_col(valid_col_units, condition, kmap):
for idx in range(len(condition)):
if idx % 2 == 0:
condition[idx] = rebuild_cond_unit_col(valid_col_units, condition[idx], kmap)
return condition
def rebuild_select_col(valid_col_units, sel, kmap):
if sel is None:
return sel
distinct, _list = sel
new_list = []
for it in _list:
agg_id, val_unit = it
new_list.append((agg_id, rebuild_val_unit_col(valid_col_units, val_unit, kmap)))
if DISABLE_DISTINCT:
distinct = None
return distinct, new_list
def rebuild_from_col(valid_col_units, from_, kmap):
if from_ is None:
return from_
from_['table_units'] = [rebuild_table_unit_col(valid_col_units, table_unit, kmap) for table_unit in from_['table_units']]
from_['conds'] = rebuild_condition_col(valid_col_units, from_['conds'], kmap)
return from_
def rebuild_group_by_col(valid_col_units, group_by, kmap):
if group_by is None:
return group_by
return [rebuild_col_unit_col(valid_col_units, col_unit, kmap) for col_unit in group_by]
def rebuild_order_by_col(valid_col_units, order_by, kmap):
if order_by is None or len(order_by) == 0:
return order_by
direction, val_units = order_by
new_val_units = [rebuild_val_unit_col(valid_col_units, val_unit, kmap) for val_unit in val_units]
return direction, new_val_units
def rebuild_sql_col(valid_col_units, sql, kmap):
if sql is None:
return sql
sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap)
sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap)
sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap)
sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap)
sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap)
sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap)
sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap)
sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap)
sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap)
return sql
def build_foreign_key_map(entry):
cols_orig = entry["column_names_original"]
tables_orig = entry["table_names_original"]
# rebuild cols corresponding to idmap in Schema
cols = []
for col_orig in cols_orig:
if col_orig[0] >= 0:
t = tables_orig[col_orig[0]]
c = col_orig[1]
cols.append("__" + t.lower() + "." + c.lower() + "__")
else:
cols.append("__all__")
def keyset_in_list(k1, k2, k_list):
for k_set in k_list:
if k1 in k_set or k2 in k_set:
return k_set
new_k_set = set()
k_list.append(new_k_set)
return new_k_set
foreign_key_list = []
foreign_keys = entry["foreign_keys"]
for fkey in foreign_keys:
key1, key2 = fkey
key_set = keyset_in_list(key1, key2, foreign_key_list)
key_set.add(key1)
key_set.add(key2)
foreign_key_map = {}
for key_set in foreign_key_list:
sorted_list = sorted(list(key_set))
midx = sorted_list[0]
for idx in sorted_list:
foreign_key_map[cols[idx]] = cols[midx]
return foreign_key_map
def build_foreign_key_map_from_json(table):
with open(table) as f:
data = json.load(f)
tables = {}
for entry in data:
tables[entry['db_id']] = build_foreign_key_map(entry)
return tables
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gold', dest='gold', type=str)
parser.add_argument('--pred', dest='pred', type=str)
parser.add_argument('--db', dest='db', type=str)
parser.add_argument('--table', dest='table', type=str)
parser.add_argument('--etype', dest='etype', type=str)
args = parser.parse_args()
gold = args.gold
pred = args.pred
db_dir = args.db
table = args.table
etype = args.etype
assert etype in ["all", "exec", "match"], "Unknown evaluation method"
kmaps = build_foreign_key_map_from_json(table)
evaluate(gold, pred, db_dir, etype, kmaps)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/evaluation.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Replace T5 SPM OOV character with `<`.
Certain punctuation characters are mapped to the OOV symbol in T5's
sentence-piece model. For Spider, this appears to only affect the `<` symbol,
so it can be deterministically recovered by running this script.
An alternative is to preprocess dataset to avoid OOV symbols for T5.
"""
from absl import app
from absl import flags
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input txt file.")
flags.DEFINE_string("output", "", "Output txt file.")
def main(unused_argv):
with open(FLAGS.output, "w") as output_file:
with open(FLAGS.input, "r") as input_file:
for line in input_file:
pred = line.replace(" β ", "<").replace("<pad>", "").replace("</s>", "").replace("<unk>", "<")
if line != pred:
print("Original: %s" % line)
print("New: %s" % pred)
output_file.write("%s" % pred)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/restore_oov.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write Spider dataset in TSV format."""
import json
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import tsv_utils
from tasks.spider import database_constants
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("examples", "", "Path to Spider json examples.")
flags.DEFINE_string("output", "", "Output tsv file.")
flags.DEFINE_bool(
"filter_by_database", True,
"Whether to only select examples for databases used for the Spider-SSP"
"setting proposed in the paper. Should be False to follow the standard"
"Spider-XSP setting.")
def normalize_whitespace(source):
tokens = source.split()
return " ".join(tokens)
def load_json(filepath):
with gfile.GFile(filepath, "r") as reader:
text = reader.read()
return json.loads(text)
def main(unused_argv):
examples_json = load_json(FLAGS.examples)
examples = []
for example_json in examples_json:
database = example_json["db_id"]
source = example_json["question"]
target = example_json["query"]
# Optionally skip if database not in set of databases with >= 50 examples.
if (FLAGS.filter_by_database and
database not in database_constants.DATABASES):
continue
# Prepend database.
source = "%s: %s" % (database, source)
target = normalize_whitespace(target)
examples.append((source.lower(), target.lower()))
tsv_utils.write_tsv(examples, FLAGS.output)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/write_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
################################
# Assumptions:
# 1. sql is correct
# 2. only table name has alias
# 3. only one intersect/union/except
#
# val: number(float)/string(str)/sql(dict)
# col_unit: (agg_id, col_id, isDistinct(bool))
# val_unit: (unit_op, col_unit1, col_unit2)
# table_unit: (table_type, col_unit/sql)
# cond_unit: (not_op, op_id, val_unit, val1, val2)
# condition: [cond_unit1, 'and'/'or', cond_unit2, ...]
# sql {
# 'select': (isDistinct(bool), [(agg_id, val_unit), (agg_id, val_unit), ...])
# 'from': {'table_units': [table_unit1, table_unit2, ...], 'conds': condition}
# 'where': condition
# 'groupBy': [col_unit1, col_unit2, ...]
# 'orderBy': ('asc'/'desc', [val_unit1, val_unit2, ...])
# 'having': condition
# 'limit': None/limit value
# 'intersect': None/sql
# 'except': None/sql
# 'union': None/sql
# }
################################
import json
import sqlite3
from nltk import word_tokenize
CLAUSE_KEYWORDS = ('select', 'from', 'where', 'group', 'order', 'limit', 'intersect', 'union', 'except')
JOIN_KEYWORDS = ('join', 'on', 'as')
WHERE_OPS = ('not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists')
UNIT_OPS = ('none', '-', '+', "*", '/')
AGG_OPS = ('none', 'max', 'min', 'count', 'sum', 'avg')
TABLE_TYPE = {
'sql': "sql",
'table_unit': "table_unit",
}
COND_OPS = ('and', 'or')
SQL_OPS = ('intersect', 'union', 'except')
ORDER_OPS = ('desc', 'asc')
class Schema:
"""
Simple schema which maps table&column to a unique identifier
"""
def __init__(self, schema):
self._schema = schema
self._idMap = self._map(self._schema)
@property
def schema(self):
return self._schema
@property
def idMap(self):
return self._idMap
def _map(self, schema):
idMap = {'*': "__all__"}
id = 1
for key, vals in schema.items():
for val in vals:
idMap[key.lower() + "." + val.lower()] = "__" + key.lower() + "." + val.lower() + "__"
id += 1
for key in schema:
idMap[key.lower()] = "__" + key.lower() + "__"
id += 1
return idMap
def get_schema(db):
"""
Get database's schema, which is a dict with table name as key
and list of column names as value
:param db: database path
:return: schema dict
"""
schema = {}
conn = sqlite3.connect(db)
cursor = conn.cursor()
# fetch table names
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [str(table[0].lower()) for table in cursor.fetchall()]
# fetch table info
for table in tables:
cursor.execute("PRAGMA table_info({})".format(table))
schema[table] = [str(col[1].lower()) for col in cursor.fetchall()]
return schema
def get_schema_from_json(fpath):
with open(fpath) as f:
data = json.load(f)
schema = {}
for entry in data:
table = str(entry['table'].lower())
cols = [str(col['column_name'].lower()) for col in entry['col_data']]
schema[table] = cols
return schema
def tokenize(string):
string = str(string)
string = string.replace("\'", "\"") # ensures all string values wrapped by "" problem??
quote_idxs = [idx for idx, char in enumerate(string) if char == '"']
assert len(quote_idxs) % 2 == 0, "Unexpected quote"
# keep string value as token
vals = {}
for i in range(len(quote_idxs)-1, -1, -2):
qidx1 = quote_idxs[i-1]
qidx2 = quote_idxs[i]
val = string[qidx1: qidx2+1]
key = "__val_{}_{}__".format(qidx1, qidx2)
string = string[:qidx1] + key + string[qidx2+1:]
vals[key] = val
toks = [word.lower() for word in word_tokenize(string)]
# replace with string value token
for i in range(len(toks)):
if toks[i] in vals:
toks[i] = vals[toks[i]]
# find if there exists !=, >=, <=
eq_idxs = [idx for idx, tok in enumerate(toks) if tok == "="]
eq_idxs.reverse()
prefix = ('!', '>', '<')
for eq_idx in eq_idxs:
pre_tok = toks[eq_idx-1]
if pre_tok in prefix:
toks = toks[:eq_idx-1] + [pre_tok + "="] + toks[eq_idx+1: ]
return toks
def scan_alias(toks):
"""Scan the index of 'as' and build the map for all alias"""
as_idxs = [idx for idx, tok in enumerate(toks) if tok == 'as']
alias = {}
for idx in as_idxs:
alias[toks[idx+1]] = toks[idx-1]
return alias
def get_tables_with_alias(schema, toks):
tables = scan_alias(toks)
for key in schema:
assert key not in tables, "Alias {} has the same name in table".format(key)
tables[key] = key
return tables
def parse_col(toks, start_idx, tables_with_alias, schema, default_tables=None):
"""
:returns next idx, column id
"""
tok = toks[start_idx]
if tok == "*":
return start_idx + 1, schema.idMap[tok]
if '.' in tok: # if token is a composite
alias, col = tok.split('.')
key = tables_with_alias[alias] + "." + col
return start_idx+1, schema.idMap[key]
assert default_tables is not None and len(default_tables) > 0, "Default tables should not be None or empty"
for alias in default_tables:
table = tables_with_alias[alias]
if tok in schema.schema[table]:
key = table + "." + tok
return start_idx+1, schema.idMap[key]
assert False, "Error col: {}".format(tok)
def parse_col_unit(toks, start_idx, tables_with_alias, schema, default_tables=None):
"""
:returns next idx, (agg_op id, col_id)
"""
idx = start_idx
len_ = len(toks)
isBlock = False
isDistinct = False
if toks[idx] == '(':
isBlock = True
idx += 1
if toks[idx] in AGG_OPS:
agg_id = AGG_OPS.index(toks[idx])
idx += 1
assert idx < len_ and toks[idx] == '('
idx += 1
if toks[idx] == "distinct":
idx += 1
isDistinct = True
idx, col_id = parse_col(toks, idx, tables_with_alias, schema, default_tables)
assert idx < len_ and toks[idx] == ')'
idx += 1
return idx, (agg_id, col_id, isDistinct)
if toks[idx] == "distinct":
idx += 1
isDistinct = True
agg_id = AGG_OPS.index("none")
idx, col_id = parse_col(toks, idx, tables_with_alias, schema, default_tables)
if isBlock:
assert toks[idx] == ')'
idx += 1 # skip ')'
return idx, (agg_id, col_id, isDistinct)
def parse_val_unit(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
isBlock = False
if toks[idx] == '(':
isBlock = True
idx += 1
col_unit1 = None
col_unit2 = None
unit_op = UNIT_OPS.index('none')
idx, col_unit1 = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)
if idx < len_ and toks[idx] in UNIT_OPS:
unit_op = UNIT_OPS.index(toks[idx])
idx += 1
idx, col_unit2 = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)
if isBlock:
assert toks[idx] == ')'
idx += 1 # skip ')'
return idx, (unit_op, col_unit1, col_unit2)
def parse_table_unit(toks, start_idx, tables_with_alias, schema):
"""
:returns next idx, table id, table name
"""
idx = start_idx
len_ = len(toks)
key = tables_with_alias[toks[idx]]
if idx + 1 < len_ and toks[idx+1] == "as":
idx += 3
else:
idx += 1
return idx, schema.idMap[key], key
def parse_value(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
isBlock = False
if toks[idx] == '(':
isBlock = True
idx += 1
if toks[idx] == 'select':
idx, val = parse_sql(toks, idx, tables_with_alias, schema)
elif "\"" in toks[idx]: # token is a string value
val = toks[idx]
idx += 1
else:
try:
val = float(toks[idx])
idx += 1
except:
end_idx = idx
while end_idx < len_ and toks[end_idx] != ',' and toks[end_idx] != ')'\
and toks[end_idx] != 'and' and toks[end_idx] not in CLAUSE_KEYWORDS and toks[end_idx] not in JOIN_KEYWORDS:
end_idx += 1
idx, val = parse_col_unit(toks[start_idx: end_idx], 0, tables_with_alias, schema, default_tables)
idx = end_idx
if isBlock:
assert toks[idx] == ')'
idx += 1
return idx, val
def parse_condition(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
conds = []
while idx < len_:
idx, val_unit = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)
not_op = False
if toks[idx] == 'not':
not_op = True
idx += 1
assert idx < len_ and toks[idx] in WHERE_OPS, "Error condition: idx: {}, tok: {}".format(idx, toks[idx])
op_id = WHERE_OPS.index(toks[idx])
idx += 1
val1 = val2 = None
if op_id == WHERE_OPS.index('between'): # between..and... special case: dual values
idx, val1 = parse_value(toks, idx, tables_with_alias, schema, default_tables)
assert toks[idx] == 'and'
idx += 1
idx, val2 = parse_value(toks, idx, tables_with_alias, schema, default_tables)
else: # normal case: single value
idx, val1 = parse_value(toks, idx, tables_with_alias, schema, default_tables)
val2 = None
conds.append((not_op, op_id, val_unit, val1, val2))
if idx < len_ and (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (")", ";") or toks[idx] in JOIN_KEYWORDS):
break
if idx < len_ and toks[idx] in COND_OPS:
conds.append(toks[idx])
idx += 1 # skip and/or
return idx, conds
def parse_select(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
assert toks[idx] == 'select', "'select' not found"
idx += 1
isDistinct = False
if idx < len_ and toks[idx] == 'distinct':
idx += 1
isDistinct = True
val_units = []
while idx < len_ and toks[idx] not in CLAUSE_KEYWORDS:
agg_id = AGG_OPS.index("none")
if toks[idx] in AGG_OPS:
agg_id = AGG_OPS.index(toks[idx])
idx += 1
idx, val_unit = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)
val_units.append((agg_id, val_unit))
if idx < len_ and toks[idx] == ',':
idx += 1 # skip ','
return idx, (isDistinct, val_units)
def parse_from(toks, start_idx, tables_with_alias, schema):
"""
Assume in the from clause, all table units are combined with join
"""
assert 'from' in toks[start_idx:], "'from' not found"
len_ = len(toks)
idx = toks.index('from', start_idx) + 1
default_tables = []
table_units = []
conds = []
while idx < len_:
isBlock = False
if toks[idx] == '(':
isBlock = True
idx += 1
if toks[idx] == 'select':
idx, sql = parse_sql(toks, idx, tables_with_alias, schema)
table_units.append((TABLE_TYPE['sql'], sql))
else:
if idx < len_ and toks[idx] == 'join':
idx += 1 # skip join
idx, table_unit, table_name = parse_table_unit(toks, idx, tables_with_alias, schema)
table_units.append((TABLE_TYPE['table_unit'],table_unit))
default_tables.append(table_name)
if idx < len_ and toks[idx] == "on":
idx += 1 # skip on
idx, this_conds = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
if len(conds) > 0:
conds.append('and')
conds.extend(this_conds)
if isBlock:
assert toks[idx] == ')'
idx += 1
if idx < len_ and (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (")", ";")):
break
return idx, table_units, conds, default_tables
def parse_where(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
if idx >= len_ or toks[idx] != 'where':
return idx, []
idx += 1
idx, conds = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
return idx, conds
def parse_group_by(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
col_units = []
if idx >= len_ or toks[idx] != 'group':
return idx, col_units
idx += 1
assert toks[idx] == 'by'
idx += 1
while idx < len_ and not (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (")", ";")):
idx, col_unit = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)
col_units.append(col_unit)
if idx < len_ and toks[idx] == ',':
idx += 1 # skip ','
else:
break
return idx, col_units
def parse_order_by(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
val_units = []
order_type = 'asc' # default type is 'asc'
if idx >= len_ or toks[idx] != 'order':
return idx, val_units
idx += 1
assert toks[idx] == 'by'
idx += 1
while idx < len_ and not (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (")", ";")):
idx, val_unit = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)
val_units.append(val_unit)
if idx < len_ and toks[idx] in ORDER_OPS:
order_type = toks[idx]
idx += 1
if idx < len_ and toks[idx] == ',':
idx += 1 # skip ','
else:
break
return idx, (order_type, val_units)
def parse_having(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
if idx >= len_ or toks[idx] != 'having':
return idx, []
idx += 1
idx, conds = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
return idx, conds
def parse_limit(toks, start_idx):
idx = start_idx
len_ = len(toks)
if idx < len_ and toks[idx] == 'limit':
idx += 2
return idx, int(toks[idx-1])
return idx, None
def parse_sql(toks, start_idx, tables_with_alias, schema):
isBlock = False # indicate whether this is a block of sql/sub-sql
len_ = len(toks)
idx = start_idx
sql = {}
if toks[idx] == '(':
isBlock = True
idx += 1
# parse from clause in order to get default tables
from_end_idx, table_units, conds, default_tables = parse_from(toks, start_idx, tables_with_alias, schema)
sql['from'] = {'table_units': table_units, 'conds': conds}
# select clause
_, select_col_units = parse_select(toks, idx, tables_with_alias, schema, default_tables)
idx = from_end_idx
sql['select'] = select_col_units
# where clause
idx, where_conds = parse_where(toks, idx, tables_with_alias, schema, default_tables)
sql['where'] = where_conds
# group by clause
idx, group_col_units = parse_group_by(toks, idx, tables_with_alias, schema, default_tables)
sql['groupBy'] = group_col_units
# having clause
idx, having_conds = parse_having(toks, idx, tables_with_alias, schema, default_tables)
sql['having'] = having_conds
# order by clause
idx, order_col_units = parse_order_by(toks, idx, tables_with_alias, schema, default_tables)
sql['orderBy'] = order_col_units
# limit clause
idx, limit_val = parse_limit(toks, idx)
sql['limit'] = limit_val
idx = skip_semicolon(toks, idx)
if isBlock:
assert toks[idx] == ')'
idx += 1 # skip ')'
idx = skip_semicolon(toks, idx)
# intersect/union/except clause
for op in SQL_OPS: # initialize IUE
sql[op] = None
if idx < len_ and toks[idx] in SQL_OPS:
sql_op = toks[idx]
idx += 1
idx, IUE_sql = parse_sql(toks, idx, tables_with_alias, schema)
sql[sql_op] = IUE_sql
return idx, sql
def load_data(fpath):
with open(fpath) as f:
data = json.load(f)
return data
def get_sql(schema, query):
toks = tokenize(query)
tables_with_alias = get_tables_with_alias(schema.schema, toks)
_, sql = parse_sql(toks, 0, tables_with_alias, schema)
return sql
def skip_semicolon(toks, start_idx):
idx = start_idx
while idx < len(toks) and toks[idx] == ";":
idx += 1
return idx
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/process_sql.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to preprocess Spider to accomondate tokenization of NQG.
NQG performs simple space-separated tokenization. The tokenization in this
module to accomondate this primarily involves splitting on punctuation, e.g.
`"foo"` becomes `" foo "`.
"""
import string
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks.spider import sql_tokenizer
def _split_punc(source):
"""Split leading or trailing punctuation."""
tokens = source.split(" ")
new_tokens = []
for token in tokens:
if all(char in string.punctuation for char in token):
new_tokens.append(token)
continue
leading_punc = None
for punc in string.punctuation:
if token.startswith(punc):
leading_punc = punc
token = token.lstrip(punc)
break
trailing_punc = None
for punc in string.punctuation:
if token.endswith(punc):
trailing_punc = punc
token = token.rstrip(punc)
break
if leading_punc:
new_tokens.append(leading_punc)
if token:
new_tokens.append(token)
if trailing_punc:
new_tokens.append(trailing_punc)
return " ".join(new_tokens)
def process_source(source):
source = _split_punc(source)
# Remove extra whitespace.
source = " ".join(source.split())
return source
def process_target(target):
"""Preprocess target for space-separated tokenization."""
target_sql_tokens = sql_tokenizer.tokenize_sql(target)
target = " ".join(target_sql_tokens)
target = _split_punc(target)
# Split punc twice, to handle "%foo%" wrapped in two punc chars.
# TODO(petershaw): Update _split_punc to correctly handle this case with
# a single invocation.
target = _split_punc(target)
# Remove extra whitespace.
target = " ".join(target.split())
return target
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/nqg_tokenization.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for tokenizing SQL."""
import sqlparse
def _is_whitespace(sqlparse_token):
return sqlparse_token.ttype == sqlparse.tokens.Whitespace
def tokenize_sql(sql_exp):
sql_exp = sql_exp.lower()
sql_exp = sql_exp.rstrip(";")
parse = sqlparse.parse(sql_exp)
sql = parse[0]
flat_tokens = sql.flatten()
sql_tokens = [
token.value for token in flat_tokens if not _is_whitespace(token)
]
return sql_tokens
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/sql_tokenizer.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Serialize and append database schema to inputs."""
import collections
import json
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import tsv_utils
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_string("output", "", "Output tsv file.")
flags.DEFINE_string("tables", "", "Spider tables JSON file.")
def load_json(filepath):
with gfile.GFile(filepath, "r") as reader:
text = reader.read()
return json.loads(text)
def _get_schema_string(table_json):
"""Returns the schema serialized as a string."""
table_id_to_column_names = collections.defaultdict(list)
for table_id, name in table_json["column_names_original"]:
table_id_to_column_names[table_id].append(name.lower())
tables = table_json["table_names_original"]
table_strings = []
for table_id, table_name in enumerate(tables):
column_names = table_id_to_column_names[table_id]
table_string = " | %s : %s" % (table_name.lower(), " , ".join(column_names))
table_strings.append(table_string)
return "".join(table_strings)
def main(unused_argv):
tables_json = load_json(FLAGS.tables)
db_id_to_schema_string = {}
for table_json in tables_json:
db_id = table_json["db_id"].lower()
db_id_to_schema_string[db_id] = _get_schema_string(table_json)
examples = tsv_utils.read_tsv(FLAGS.input)
new_examples = []
for source, target in examples:
db_id = source.split()[0].rstrip(":")
schema_string = db_id_to_schema_string[db_id]
new_source = "%s%s" % (source, schema_string)
new_examples.append((new_source.lower(), target.lower()))
tsv_utils.write_tsv(new_examples, FLAGS.output)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/append_schema.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate gold targets with database ID for Spider evaluation."""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import tsv_utils
from tasks.spider import database_constants
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "",
"Input tsv file (e.g. output of split_dataset.py).")
flags.DEFINE_string("output", "", "Output txt file.")
def main(unused_argv):
formatted_db_id_to_db_id = {}
for db_id in database_constants.DATABASES:
formatted_db_id_to_db_id[db_id.lower()] = db_id
formatted_db_id_to_db_id[db_id] = db_id
examples = tsv_utils.read_tsv(FLAGS.input)
with gfile.GFile(FLAGS.output, "w") as txt_file:
for example in examples:
db_id = example[0].split()[0].rstrip(":")
db_id = formatted_db_id_to_db_id[db_id]
txt_file.write("%s\t%s\n" % (example[1], db_id))
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/generate_gold.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants related to Spider databases."""
# Databases with >= 50 examples.
DATABASES = [
"student_assessment",
"bike_1",
"flight_1",
"allergy_1",
"store_1",
"customers_card_transactions",
"chinook_1",
"match_season",
"apartment_rentals",
"college_2",
"customers_and_invoices",
"small_bank_1",
"formula_1",
"csu_1",
"movie_1",
"inn_1",
"election",
"icfp_1",
"sakila_1",
"loan_1",
"college_1",
"sports_competition",
"hr_1",
"music_1",
"baseball_1",
"e_learning",
"hospital_1",
"student_1",
"cre_Doc_Tracking_DB",
"club_1",
"tracking_grants_for_research",
"network_2",
"college_3",
"department_store",
"soccer_2",
"cre_Drama_Workshop_Groups",
"music_2",
"manufactory_1",
"voter_2",
"products_gen_characteristics",
"dorm_1",
"cre_Theme_park",
"game_1",
"customers_and_addresses",
"music_4",
"cre_Docs_and_Epenses",
"wine_1",
"driving_school",
"activity_1",
"flight_4",
"tracking_orders",
]
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/database_constants.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for extracting entities from geobase file.
geobase file is available at:
http://www.cs.utexas.edu/users/ml/nldata/geoquery.html
"""
import collections
from tensorflow.io import gfile
GeoEntity = collections.namedtuple(
"GeoEntity",
[
"aliases", # List of Strings.
"attribute", # String.
"identifier", # String.
])
def _add_underspecified_city_constant(city_name, identifiers_to_entities):
identifier = "cityid('%s',_)" % city_name
if identifier in identifiers_to_entities:
return
identifiers_to_entities[identifier] = GeoEntity(
identifier=identifier, attribute="cityid", aliases=[city_name])
def _add_city_constants(city_name, state_name, state_abbreviation,
identifiers_to_entities):
"""Add constants for fully and under-specified city."""
_add_underspecified_city_constant(city_name, identifiers_to_entities)
identifier = "cityid('%s','%s')" % (city_name, state_abbreviation)
if identifier in identifiers_to_entities:
return
aliases = [
"%s %s" % (city_name, state_name),
"%s %s" % (city_name, state_abbreviation),
]
identifiers_to_entities[identifier] = GeoEntity(
identifier=identifier, attribute="cityid", aliases=aliases)
def _add_state_constant(name, identifiers_to_entities):
identifier = "stateid('%s')" % name
if identifier in identifiers_to_entities:
return
identifiers_to_entities[identifier] = GeoEntity(
identifier=identifier, attribute="stateid", aliases=[name])
def _add_river_constant(name, identifiers_to_entities):
"""Add entities for rivers."""
identifier = "riverid('%s')" % name
if identifier in identifiers_to_entities:
return
identifiers_to_entities[identifier] = GeoEntity(
identifier=identifier, attribute="riverid", aliases=[name])
def _add_place_constant(name, identifiers_to_entities):
identifier = "placeid('%s')" % name
if identifier in identifiers_to_entities:
return
identifiers_to_entities[identifier] = GeoEntity(
identifier=identifier, attribute="placeid", aliases=[name])
def _add_usa(identifiers_to_entities):
"""Add constant for usa."""
# Only one `country` predicate appears in geobase:
# country('usa',307890000,9826675)
# Special-case `usa` and add some known aliases.
identifier = "countryid(usa)"
aliases = [
"america",
"continental us",
"united states",
"us",
"usa",
"country",
]
identifiers_to_entities[identifier] = GeoEntity(
identifier=identifier, attribute="countryid", aliases=aliases)
import pdb
def load_entities(geobase_file):
"""Returns list of GeoEntity tuples for geobase entities."""
# Identifier string to GeoEntity tuple.
identifiers_to_entities = {}
with gfile.GFile(geobase_file, "r") as inputfile:
for line in inputfile:
# line = line.decode("latin1")
if line.startswith("state"):
splits = line.split("'")
state_name = splits[1]
state_abbreviation = splits[3]
city_capital = splits[5]
city_1 = splits[7]
city_2 = splits[9]
city_3 = splits[11]
city_4 = splits[13]
_add_state_constant(state_name, identifiers_to_entities)
for city_name in [city_capital, city_1, city_2, city_3, city_4]:
_add_city_constants(city_name, state_name, state_abbreviation,
identifiers_to_entities)
elif line.startswith("city"):
state_name = line.split("'")[1]
state_abbreviation = line.split("'")[3]
city_name = line.split("'")[5]
_add_city_constants(city_name, state_name, state_abbreviation,
identifiers_to_entities)
elif line.startswith("river"):
river_name = line.split("'")[1]
_add_river_constant(river_name, identifiers_to_entities)
elif line.startswith("mountain"):
mountain_name = line.split("'")[5]
_add_place_constant(mountain_name, identifiers_to_entities)
elif line.startswith("highlow"):
lowpoint_name = line.split("'")[5]
highpoint_name = line.split("'")[7]
_add_place_constant(lowpoint_name, identifiers_to_entities)
_add_place_constant(highpoint_name, identifiers_to_entities)
# This city is not mentioned in geobase directly, but referenced by a query
# in the train set.
_add_city_constants("springfield", "south dakota", "sd",
identifiers_to_entities)
_add_usa(identifiers_to_entities)
return identifiers_to_entities.values()
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/geobase_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Split dataset tsv file based on target templates."""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import template_utils
from tasks import tsv_utils
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_string(
"output_1", "",
"Output tsv file containing up to `max_num_examples_1` examples.")
flags.DEFINE_string("output_2", "",
"Output tsv file containing the remaining examples.")
flags.DEFINE_float("max_num_examples_1", 440,
"Maximum number of examples for output_1.")
flags.DEFINE_integer("seed", 1, "Seed for splitting examples.")
def funql_template_fn(target):
"""Simply returns target since entities are already anonymized in targets."""
return target
def main(unused_argv):
examples = tsv_utils.read_tsv(FLAGS.input)
examples_1, examples_2 = template_utils.split_by_template(
examples,
template_fn=funql_template_fn,
max_num_examples_1=FLAGS.max_num_examples_1,
seed=FLAGS.seed)
tsv_utils.write_tsv(examples_1, FLAGS.output_1)
tsv_utils.write_tsv(examples_2, FLAGS.output_2)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/gen_template_split.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for defining atoms and compounds for FunQL."""
import collections
# Placeholder symbol for compounds.
_PLACEHOLDER = "__"
def _split_arguments(args_string):
"""Splits comma-joined argument list.
For example, an input of "foo, bar(xyz, abc), bar" will be split
into: ["foo", "bar(xyz, abc)", "bar"].
Args:
args_string: String input for comma-separated argument list.
Returns:
List of Strings for each argument.
"""
argument_buffer = []
arguments = []
open_parens = 0
for char in args_string:
if char == "," and open_parens == 0:
arguments.append("".join(argument_buffer))
argument_buffer = []
elif char == " " and not argument_buffer:
continue
else:
if char == "(":
open_parens += 1
elif char == ")":
open_parens -= 1
argument_buffer.append(char)
arguments.append("".join(argument_buffer))
return arguments
def _get_name_and_arguments(funql):
"""Returns function name and argument sub-expressions."""
funql = funql.strip()
paren_index = funql.find("(")
if paren_index == -1:
return funql, None
name = funql[:paren_index].strip()
arguments = funql[paren_index + 1:].strip()
if arguments[-1] != ")":
raise ValueError("Invalid arguments string ends with %s: %s" %
(arguments[-1], arguments))
arguments = _split_arguments(arguments[:-1])
return name, arguments
def _get_compound_string(outer, outer_arity, inner, inner_idx):
arguments = [_PLACEHOLDER] * outer_arity
arguments[inner_idx] = inner
return "%s( %s )" % (outer, " , ".join(arguments))
def _get_compounds_inner(funql, compounds_to_counts):
"""Recursively add compound counts to compounds_to_counts."""
name, arguments = _get_name_and_arguments(funql)
if not arguments:
return
for argument_idx, argument in enumerate(arguments):
argument_name, _ = _get_name_and_arguments(argument)
compound = _get_compound_string(name, len(arguments), argument_name,
argument_idx)
compounds_to_counts[compound] += 1
_get_compounds_inner(argument, compounds_to_counts)
def get_compounds(target):
"""Use combinations of 2 atoms as compounds."""
compounds_to_count = collections.Counter()
_get_compounds_inner(target, compounds_to_count)
return compounds_to_count
def get_atoms(target):
"""Use individual tokens as atoms."""
atoms = set()
for token in target.split():
if token not in ("(", ")", ","):
atoms.add(token)
return atoms
def get_atoms_with_num_arguments(target):
"""Consider symbols and their number of arguments."""
name, arguments = _get_name_and_arguments(target)
if arguments:
atoms = set()
atoms.add("%s_(%s)" % (name, len(arguments)))
for argument in arguments:
atoms |= get_atoms_with_num_arguments(argument)
return atoms
else:
return {name}
def get_example_compounds(example):
return get_compounds(example[1])
def get_example_atoms(example):
return get_atoms(example[1])
def get_example_atoms_with_num_arguments(example):
return get_atoms_with_num_arguments(example[1])
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/tmcd_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes % of examples in input_2 containing an atom not input_1."""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import mcd_utils
from tasks import tsv_utils
from tasks.geoquery import tmcd_utils
FLAGS = flags.FLAGS
flags.DEFINE_string("input_1", "", "Input tsv file.")
flags.DEFINE_string("input_2", "", "Input tsv file.")
def main(unused_argv):
examples_1 = tsv_utils.read_tsv(FLAGS.input_1)
examples_2 = tsv_utils.read_tsv(FLAGS.input_2)
atoms_1 = mcd_utils.get_all_atoms(
examples_1, get_atoms_fn=tmcd_utils.get_example_atoms)
num_examples = 0
num_examples_with_unseen_atom = 0
for example in examples_2:
atoms = tmcd_utils.get_example_atoms(example)
num_examples += 1
for atom in atoms:
if atom not in atoms_1:
print("New atom: %s" % atom)
num_examples_with_unseen_atom += 1
break
print("num_examples: %s" % num_examples)
print("num_examples_with_unseen_atom: %s" % num_examples_with_unseen_atom)
print("pct: %s" % (float(num_examples_with_unseen_atom) / num_examples))
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/measure_unseen_atoms.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for generating geoquery data."""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import tsv_utils
from tasks.geoquery import entity_utils
from tasks.geoquery import funql_normalization
from tasks.geoquery import geobase_utils
from tasks.geoquery import xml_file_utils
FLAGS = flags.FLAGS
flags.DEFINE_string("corpus", "", "Path to geoquery xml file.")
flags.DEFINE_string("geobase", "", "Path to geobase file.")
flags.DEFINE_string("output", "", "Output dataset file.")
def get_examples():
"""Return list of example tuples."""
xml_examples = xml_file_utils.read_examples(FLAGS.corpus)
examples = []
geobase_entities = geobase_utils.load_entities(FLAGS.geobase)
for utterance, funql in xml_examples:
funql = funql_normalization.normalize_funql(funql)
funql, utterance, _ = entity_utils.replace_entities(funql, utterance,
geobase_entities)
funql = funql_normalization.add_space_separation(funql)
examples.append((utterance, funql))
return examples
def main(unused_argv):
examples = get_examples()
tsv_utils.write_tsv(examples, FLAGS.output)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/write_dataset.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for reading XML datafile for GeoQuery."""
from xml.etree import ElementTree
from tensorflow.io import gfile
def process_utterance(utterance):
"""Lowercase and remove punctuation."""
return utterance.lower().rstrip("?").rstrip(".").rstrip().replace(" '", "")
def process_funql(funql):
"""Remove quotes and unnecessary spaces."""
funql = funql.replace("'", "")
funql = funql.replace(", ", ",")
funql = funql.replace(", ", ",")
funql = funql.replace(" ,", ",")
return funql
def load_xml_tree(corpus):
with gfile.GFile(corpus, "r") as xml_file:
return ElementTree.fromstring(xml_file.read())
def get_utterance(example_root):
for utterance in example_root.findall("nl"):
if utterance.attrib["lang"] == "en":
return process_utterance(utterance.text.strip())
raise ValueError("Could not find utterance.")
def get_funql(example_root):
for mrl in example_root.findall("mrl"):
if mrl.attrib["lang"] == "geo-funql":
return process_funql(mrl.text.strip())
raise ValueError("Could not find funql.")
def read_examples(corpus):
examples = []
root = load_xml_tree(corpus)
for example_root in root:
examples.append((get_utterance(example_root), get_funql(example_root)))
return examples
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/xml_file_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Measures and prints compound divergence between two sets of examples."""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import mcd_utils
from tasks import tsv_utils
from tasks.geoquery import tmcd_utils
FLAGS = flags.FLAGS
flags.DEFINE_string("input_1", "", "Input tsv file.")
flags.DEFINE_string("input_2", "", "Input tsv file.")
def main(unused_argv):
examples_1 = tsv_utils.read_tsv(FLAGS.input_1)
examples_2 = tsv_utils.read_tsv(FLAGS.input_2)
divergence = mcd_utils.measure_example_divergence(
examples_1, examples_2, get_compounds_fn=tmcd_utils.get_example_compounds)
print("Compound divergence: %s" % divergence)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/measure_compound_divergence.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for replacing entities in FunQL."""
def _maybe_list_replace(lst, sublist, replacement):
"""Replace first occurrence of sublist in lst with replacement."""
new_list = []
idx = 0
replaced = False
while idx < len(lst):
if not replaced and lst[idx:idx + len(sublist)] == sublist:
new_list.append(replacement)
replaced = True
idx += len(sublist)
else:
new_list.append(lst[idx])
idx += 1
if not replaced:
return None
return new_list
def _maybe_replace_entity(funql, utterance, mention_map, geobase_entity):
"""Replace entity identifiers so that they can be generated using copy."""
# GeoQuery has <= 2 mentions per query.
mention_marker = "m1" if "m0" in utterance else "m0"
# Split utterance to avoid replacing some substring of a token.
tokens = utterance.split(" ")
# Order aliases by longest alias, since some can be nested in others.
aliases = sorted(geobase_entity.aliases, key=lambda x: -len(x))
for alias in aliases:
alias_tokens = alias.split(" ")
new_tokens = _maybe_list_replace(tokens, alias_tokens, mention_marker)
if new_tokens:
normalized_identifier = geobase_entity.identifier.replace("'", "")
new_funql = funql.replace(normalized_identifier, mention_marker)
new_utterance = " ".join(new_tokens)
mention_map[mention_marker] = geobase_entity.identifier
return new_funql, new_utterance, mention_map
# Could not find alias.
return funql, utterance, mention_map
def replace_entities(funql, utterance, geobase_entities):
"""Replace entity references with something more copy friendly."""
# Order entities by longest identifier, since some can be nested
# in others.
geobase_entities = sorted(geobase_entities, key=lambda x: -len(x.identifier))
mention_map = {}
for geobase_entity in geobase_entities:
normalized_identifier = geobase_entity.identifier.replace("'", "")
if normalized_identifier in funql:
funql, utterance, mention_map = _maybe_replace_entity(
funql, utterance, mention_map, geobase_entity)
return funql, utterance, mention_map
def restore_entities(funql, mention_map):
"""Restore entities in funql."""
for mention_mark, identifier in mention_map.items():
funql = funql.replace(mention_mark, "%s" % identifier)
return funql
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/entity_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for (reversible) normalization of FunQL.
FunQL is defined here:
https://www.cs.utexas.edu/~ml/wasp/geo-funql.html
We use the corresponding lambda term definitions to expand various functions
to a more intuitive form that better reflects the arity of the underlying
operations.
"""
RELATION_CONSTANTS = [
"area_1", "capital_1", "capital_2", "density_1", "elevation_1",
"elevation_2", "high_point_1", "high_point_2", "higher_2", "loc_1", "loc_2",
"low_point_1", "low_point_2", "lower_2", "next_to_1", "next_to_2",
"population_1", "traverse_1", "traverse_2", "longer", "len", "size"
]
# Can occur with `all` as argumnent.
UNARY_CONSTANTS = [
"capital", "city", "lake", "major", "mountain", "place", "river", "state"
]
ENTITY_FUNCTIONS = ["cityid", "stateid", "riverid", "placeid", "countryid"]
ARITY_1 = [
"largest", "smallest", "highest", "lowest", "longest", "shortest", "count",
"sum"
]
ARITY_2 = ["largest_one", "smallest_one"]
ARITY_3 = ["most", "fewest"]
def _split_arguments(span):
"""Splits span into list of spans based on commas."""
argument_buffer = []
arguments = []
open_parens = 0
for char in span:
if char == "," and open_parens == 0:
arguments.append("".join(argument_buffer))
argument_buffer = []
elif char == " " and not argument_buffer:
continue
else:
if char == "(":
open_parens += 1
elif char == ")":
open_parens -= 1
argument_buffer.append(char)
arguments.append("".join(argument_buffer))
return arguments
def _get_name_and_arguments(span):
"""Returns function name and argument sub-expressions."""
span = span.strip()
paren_index = span.find("(")
if paren_index == -1:
raise ValueError("Funql contains no `(`: %s" % span)
name = span[:paren_index]
arguments = span[paren_index + 1:]
if arguments[-1] != ")":
raise ValueError("Invalid arguments string ends with %s: %s" %
(arguments[-1], arguments))
arguments = _split_arguments(arguments[:-1])
return name, arguments
def _convert_function(name, argument_0, arity):
"""Converts a function that contains nested arguments."""
output_arguments = []
inner_funql = argument_0
for _ in range(arity - 1):
nested_argument, arguments = _get_name_and_arguments(inner_funql)
if len(arguments) > 1:
raise ValueError
inner_funql = arguments[0]
output_arguments.append(nested_argument)
output_arguments.append(normalize_funql(inner_funql))
output = "%s(%s)" % (name, ",".join(output_arguments))
return output
def normalize_funql(funql):
"""Recursively parse FunQL string to re-formatted string."""
# Special constant used for "sea level".
if funql == "0":
return "0"
name, arguments = _get_name_and_arguments(funql)
if name == "answer":
if len(arguments) != 1:
raise ValueError
argument_0 = arguments[0]
return "%s(%s)" % (name, normalize_funql(argument_0))
elif name in ENTITY_FUNCTIONS:
return funql
elif name in RELATION_CONSTANTS:
if len(arguments) != 1:
raise ValueError
argument_0 = arguments[0]
reformatted_argument_0 = normalize_funql(argument_0)
if not reformatted_argument_0:
raise ValueError("Failed to reformat: %s" % argument_0)
return "%s(%s)" % (name, reformatted_argument_0)
elif name in UNARY_CONSTANTS:
if len(arguments) != 1:
raise ValueError
argument_0 = arguments[0]
if argument_0 == "all":
return name
else:
recursive_term = normalize_funql(argument_0)
return "intersection(%s,%s)" % (name, recursive_term)
elif name == "intersection" or name == "exclude":
if len(arguments) != 2:
raise ValueError
argument_0 = arguments[0]
argument_1 = arguments[1]
term_a = normalize_funql(argument_0)
term_b = normalize_funql(argument_1)
return "%s(%s,%s)" % (name, term_a, term_b)
elif name in ARITY_1:
if len(arguments) != 1:
raise ValueError
argument_0 = arguments[0]
return _convert_function(name, argument_0, 1)
elif name in ARITY_2:
if len(arguments) != 1:
raise ValueError
argument_0 = arguments[0]
return _convert_function(name, argument_0, 2)
elif name in ARITY_3:
if len(arguments) != 1:
raise ValueError
argument_0 = arguments[0]
return _convert_function(name, argument_0, 3)
else:
raise ValueError("No match for name: %s" % name)
def restore_funql(funql):
"""Recursively parse FunQL string back to original string."""
# Special constant used for "sea level".
if funql == "0":
return "0"
if funql in UNARY_CONSTANTS:
return "%s(all)" % funql
name, arguments = _get_name_and_arguments(funql)
if name == "answer" or name in RELATION_CONSTANTS or name in ARITY_1:
if len(arguments) != 1:
raise ValueError
argument_0 = arguments[0]
return "%s(%s)" % (name, restore_funql(argument_0))
elif name in RELATION_CONSTANTS:
if len(arguments) != 1:
raise ValueError
argument_0 = arguments[0]
restored_argument_0 = restore_funql(argument_0)
if not restored_argument_0:
raise ValueError("Failed to restore: %s" % argument_0)
return "%s(%s)" % (name, restored_argument_0)
elif name in ENTITY_FUNCTIONS:
return funql
elif name == "intersection":
if len(arguments) != 2:
raise ValueError
argument_0 = arguments[0]
argument_1 = arguments[1]
term_a = restore_funql(argument_0)
term_b = restore_funql(argument_1)
if argument_0 in UNARY_CONSTANTS:
return "%s(%s)" % (argument_0, restore_funql(argument_1))
if argument_1 in UNARY_CONSTANTS:
raise ValueError
return "%s(%s,%s)" % (name, term_a, term_b)
elif name == "exclude":
if len(arguments) != 2:
raise ValueError
argument_0 = arguments[0]
argument_1 = arguments[1]
term_a = restore_funql(argument_0)
term_b = restore_funql(argument_1)
return "%s(%s,%s)" % (name, term_a, term_b)
elif name in ARITY_2:
if len(arguments) != 2:
raise ValueError("Unexpected number of arguments `%s` for `%s`" %
(arguments, name))
argument_0 = arguments[0]
argument_1 = arguments[1]
return "%s(%s(%s))" % (name, argument_0, restore_funql(argument_1))
elif name in ARITY_3:
if len(arguments) != 3:
raise ValueError
argument_0 = arguments[0]
argument_1 = arguments[1]
argument_2 = arguments[2]
return "%s(%s(%s(%s)))" % (name, argument_0, argument_1,
restore_funql(argument_2))
else:
raise ValueError("No match for name: %s" % name)
def add_space_separation(funql):
"""Split funql and join with space separator."""
separators = "(),"
buffer = ""
symbols = []
for char in funql:
if char in separators:
if buffer:
symbols.append(buffer)
buffer = ""
symbols.append(char)
else:
buffer += char
if buffer:
symbols.append(buffer)
return " ".join(symbols)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/funql_normalization.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Split dataset tsv file based on TMCD methodology."""
import random
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import mcd_utils
from tasks import tsv_utils
from tasks.geoquery import tmcd_utils
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_string("output_1", "",
"Output tsv file containing `num_examples_1` examples.")
flags.DEFINE_string("output_2", "",
"Output tsv file containing the remaining examples.")
flags.DEFINE_integer("num_examples_1", 440, "Number of examples for output_1.")
flags.DEFINE_integer("seed", 1, "Seed for splitting examples.")
flags.DEFINE_integer("min_atom_count", 1, "Min occurrences of atoms.")
flags.DEFINE_bool(
"get_atoms_with_num_arguments", False,
"Whether to treat symbols that appear with different numbers "
"of arguments as different atoms.")
def main(unused_argv):
examples = tsv_utils.read_tsv(FLAGS.input)
# First, randomly split examples.
random.seed(FLAGS.seed)
random.shuffle(examples)
examples_1 = examples[:FLAGS.num_examples_1]
examples_2 = examples[FLAGS.num_examples_1:]
# Swap examples to meet atom constraint and maximize compound divergence.
get_atoms_fn = (
tmcd_utils.get_example_atoms_with_num_arguments
if FLAGS.get_atoms_with_num_arguments else tmcd_utils.get_example_atoms)
examples_1, examples_2 = mcd_utils.swap_examples(
examples_1,
examples_2,
get_compounds_fn=tmcd_utils.get_example_compounds,
get_atoms_fn=get_atoms_fn,
max_iterations=1000,
max_divergence=None,
min_atom_count=FLAGS.min_atom_count)
tsv_utils.write_tsv(examples_1, FLAGS.output_1)
tsv_utils.write_tsv(examples_2, FLAGS.output_2)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/gen_tmcd_split.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CFG parser for non-binarized grammars.
The parser uses callbacks so that it can be flexibly extended to various
use cases, such as QCFG parsing.
There are two equivalent implementations, with the Trie variant being a bit
more complicated but faster for most applications, especially for longer inputs.
"""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from common.cky import cky_utils
from common.cky import trie_utils
def parse(input_ids,
rules,
nonterminals,
start_idx,
populate_fn,
postprocess_fn,
use_trie=True,
verbose=False):
"""Run bottom up parser.
Let T be an arbitrary type for chart entries, specified by the return type
of populate_fn. Examples for T are simple types that simply indicate presenece
of a parse for a given span, or more complex structures that represent
parse forests.
Args:
input_ids: List of integers corresponding to idx of terminal CFGSymbols in
rules.
rules: A list of CFGRule instances.
nonterminals: Collection of CFGSymbol objects for possible non-terminals.
start_idx: Index of non-terminal that is start symbol.
populate_fn: A function that takes: (span_begin (Interger), span_end
(Integer), parser_rule (CFGRule), substitutions (List of T)) and returns
an object of type T, which can be any type. This object is added to the
chart. Depending on what information is desired about completed parses, T
can be anything from a simple count to a complex parse forest object.
postprocess_fn: A function that takes and returns a list of T. This function
post-processes each cell after it has been populated. This function is
useful for pruning the chart, or merging equivalent entries. Ignored if
None.
use_trie: Whether to use the Trie-based parsing algorithm.
verbose: Print debug logging if True.
Returns:
A list of T.
"""
if use_trie:
return trie_utils.parse(input_ids, rules, nonterminals, start_idx,
populate_fn, postprocess_fn, verbose)
else:
return cky_utils.parse(input_ids, rules, nonterminals, start_idx,
populate_fn, postprocess_fn, verbose)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/common/cky/cfg_parser.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements a CFG parser based on a variant of the CKY algorithm.
The parser is naively extended to consider non-binarized rules containing up
to 2 RHS non-terminals and any number of terminals. The runtime for this
naive implementation is therefore O(n^6), which can be too slow for longer
inputs.
"""
import collections
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from common.cky import cfg_rule
def parse(input_ids,
rules,
nonterminals,
start_idx,
populate_fn,
postprocess_fn,
verbose=False):
"""Run bottom up parser using variant of CKY algorithm."""
input_len = len(input_ids)
input_symbols = tuple(
[cfg_rule.CFGSymbol(idx, cfg_rule.TERMINAL) for idx in input_ids])
# Initialize the empty chart.
# Keys are a 3-tuple of Integers: (span_begin, span_end, nonterminal_idx)
# Values are a list of T.
chart = collections.defaultdict(list)
# Index rules by RHS.
rhs_to_rules = collections.defaultdict(list)
for rule in rules:
rhs_to_rules[rule.rhs].append(rule)
# Populate the chart.
for span_end in range(1, input_len + 1):
for span_begin in range(span_end - 1, -1, -1):
# Find matching rules with 0 NTs.
rhs_key_0_nt = input_symbols[span_begin:span_end]
if rhs_key_0_nt in rhs_to_rules:
for rule in rhs_to_rules[rhs_key_0_nt]:
chart[span_begin, span_end,
rule.lhs].append(populate_fn(span_begin, span_end, rule, []))
# Find matching rules with 1 NTs.
for nt_0_start in range(span_begin, span_end):
for nt_0_end in range(nt_0_start + 1, span_end + 1):
for nt_0 in nonterminals:
rhs_key_1_nt = (
input_symbols[span_begin:nt_0_start] +
(cfg_rule.CFGSymbol(nt_0, cfg_rule.NON_TERMINAL),) +
input_symbols[nt_0_end:span_end])
if rhs_key_1_nt in rhs_to_rules:
for node_0 in chart[nt_0_start, nt_0_end, nt_0]:
for rule in rhs_to_rules[rhs_key_1_nt]:
chart[span_begin, span_end, rule.lhs].append(
populate_fn(span_begin, span_end, rule, [node_0]))
# Find matching rules with 2 NTs.
for nt_0_start in range(span_begin, span_end - 1):
for nt_0_end in range(nt_0_start + 1, span_end):
for nt_1_start in range(nt_0_end, span_end):
for nt_1_end in range(nt_1_start + 1, span_end + 1):
for nt_0 in nonterminals:
for nt_1 in nonterminals:
rhs_key_2_nt = (
input_symbols[span_begin:nt_0_start] +
(cfg_rule.CFGSymbol(nt_0, cfg_rule.NON_TERMINAL),) +
input_symbols[nt_0_end:nt_1_start] +
(cfg_rule.CFGSymbol(nt_1, cfg_rule.NON_TERMINAL),) +
input_symbols[nt_1_end:span_end])
if rhs_key_2_nt in rhs_to_rules:
nt_0_index = (nt_0_start, nt_0_end, nt_0)
nt_1_index = (nt_1_start, nt_1_end, nt_1)
for node_0 in chart[nt_0_index]:
for node_1 in chart[nt_1_index]:
for rule in rhs_to_rules[rhs_key_2_nt]:
chart[span_begin, span_end, rule.lhs].append(
populate_fn(span_begin, span_end, rule,
[node_0, node_1]))
if postprocess_fn:
for nt in nonterminals:
chart[span_begin, span_end, nt] = postprocess_fn(chart[span_begin,
span_end, nt])
if verbose:
for nt in nonterminals:
cell = chart[span_begin, span_end, nt]
if cell:
print("Populated (%s,%s): %s - %s" %
(span_begin, span_end, nt, cell))
# Return completed parses.
return chart[(0, input_len, start_idx)]
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/common/cky/cky_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define structures to represent CFG symbols and rules.
For efficiency, all symbols are referenced by integers rather than strings.
This typically requires some pre-processing to define terminal
and non-terminal vocabularies and map symbols to corresponding integers.
"""
import collections
# CFGSymbol type constants.
TERMINAL = 0
NON_TERMINAL = 1
# Represents a TERMINAL or NON_TERMINAL symbol.
CFGSymbol = collections.namedtuple(
"CFGSymbol",
[
"idx", # Integer (considered as separate id spaces for different type).
"type", # Integer (TERMINAL or NON_TERMINAL).
])
# Represents a CFG rule.
CFGRule = collections.namedtuple(
"CFGRule",
[
"idx", # Integer to optionally reference additional rule information.
"lhs", # Integer non-terminal index.
"rhs", # Tuple of >= 1 CFGSymbols.
])
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/common/cky/cfg_rule.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements CKY parsing using a Trie data structure to index rules.
This implementation supports non-binarized grammars with rules containing
up to 2 non-terminals.
For each span, rather than enumerating every possible sub-span for up to
2 non-terminals, the algorithm iterates across the span left-to-right and
attempts to match rules stored in a Trie.
"""
import collections
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from common.cky import cfg_rule
class TrieNode(object):
"""Represents a node in a generic Trie data structure."""
def __init__(self, symbol=None):
# The individual symbol associated with this node.
self.symbol = symbol # Can only be None for root.
# Map from symbol to TrieNode.
self.symbol_to_child = {}
# A set of arbitrarily-typed values associated with this node.
self.values = []
def maybe_add_child(self, symbol):
"""Adds a new node for a given child symbol if not already in Trie."""
if symbol in self.symbol_to_child:
return self.symbol_to_child[symbol]
else:
node = TrieNode(symbol)
self.symbol_to_child[symbol] = node
return node
def maybe_get_child(self, symbol):
return self.symbol_to_child.get(symbol)
def __str__(self):
return "%s %s" % (self.symbol, set(self.symbol_to_child.keys()))
def __repr__(self):
return str(self)
def print_trie(trie_node, indent=0):
"""Recursively prints Trie for debugging purposes."""
print("%s %s" % ("-" * indent, trie_node.symbol))
for value in trie_node.values:
print("%s value: %s" % ("-" * indent, value))
for child in trie_node.symbol_to_child.values():
print_trie(child, indent=indent + 1)
def add_rule_to_trie(trie_root, rule):
current_node = trie_root
for symbol in rule.rhs:
current_node = current_node.maybe_add_child(symbol)
current_node.values.append(rule)
class Chart(object):
"""Represents parse chart state."""
def __init__(self, populate_fn, postprocess_fn):
# The key_map stores chart entries (of type T) indexed by:
# (span_begin, span_end, nonterminal)
self.key_map = collections.defaultdict(list)
# For optimization purposes, we also index chart entries by their
# span_begin index only in start_map.
# Key is span_begin and value is List of (span_end, nonterminal).
self.start_map = collections.defaultdict(set)
# See `cfg_parser.py` for definitions of populate_fn and postprocess_fn.
self.populate_fn = populate_fn
self.postprocess_fn = postprocess_fn
def add(self, span_begin, span_end, rule, children):
"""Add an entry to the chart."""
entry = self.populate_fn(span_begin, span_end, rule, children)
nonterminal = rule.lhs
self.key_map[(span_begin, span_end, nonterminal)].append(entry)
self.start_map[span_begin].add((span_end, nonterminal))
def get_from_key(self, span_begin, span_end, nonterminal):
"""Get entries based on full key."""
return self.key_map[(span_begin, span_end, nonterminal)]
def get_from_start(self, span_begin):
"""Get entries based on start index only."""
return self.start_map[span_begin]
def postprocess(self, span_begin, span_end, nonterminal):
"""Apply postpostprocess_fn to a chart cell."""
if self.postprocess_fn:
self.key_map[(span_begin, span_end, nonterminal)] = self.postprocess_fn(
self.key_map[(span_begin, span_end, nonterminal)])
# For a given span, SearchState represents a potential match with a ParserRule.
SearchState = collections.namedtuple(
"SearchState",
[
"anchored_nonterminals", # List of (span_begin, span_end, nonterminal).
"trie_node", # TrieNode.
])
# The maximum number of RHS non-terminals in ParserRules that are supported.
MAX_NONTERMINALS = 2
def parse(input_ids,
rules,
nonterminals,
start_idx,
populate_fn,
postprocess_fn,
verbose=False):
"""Run bottom up parser using Trie-based implementation."""
input_len = len(input_ids)
input_symbols = tuple(
[cfg_rule.CFGSymbol(idx, cfg_rule.TERMINAL) for idx in input_ids])
# Initialize the empty chart.
chart = Chart(populate_fn, postprocess_fn)
# Initialize Trie of rules.
trie_root = TrieNode()
for rule in rules:
add_rule_to_trie(trie_root, rule)
# Populate the chart.
for span_end in range(1, input_len + 1):
for span_begin in range(span_end - 1, -1, -1):
# Map of span_begin to List of SearchState.
search_map = collections.defaultdict(list)
search_map[span_begin].append(SearchState([], trie_root))
# Iterate across every input token in the span range to find rule matches.
for idx in range(span_begin, span_end):
# End early if there are no remaining candidate matches.
if not search_map[idx]:
continue
terminal_symbol = input_symbols[idx]
# Iterate through partial matches.
while search_map[idx]:
search_state = search_map[idx].pop()
# Consider matching terminal.
new_trie_node = search_state.trie_node.maybe_get_child(
terminal_symbol)
if new_trie_node:
# Found a match for the terminal in the Trie.
# Add a partial match to search_map with idx incremented by 1 token.
new_search_state = SearchState(search_state.anchored_nonterminals,
new_trie_node)
search_map[idx + 1].append(new_search_state)
# Consider matching non-terminal.
nonterminal_tuples = chart.get_from_start(idx)
if len(search_state.anchored_nonterminals) < MAX_NONTERMINALS:
# Iterate through lower chart entries with a completed sub-tree
# that starts at the current index.
for nt_end, nonterminal in nonterminal_tuples:
nonterminal_symbol = cfg_rule.CFGSymbol(nonterminal,
cfg_rule.NON_TERMINAL)
new_trie_node = search_state.trie_node.maybe_get_child(
nonterminal_symbol)
if new_trie_node:
# Found a match for the non-terminal in the Trie.
# Add a partial match to search_map with idx set to the end
# of the sub-tree span.
new_anchored_nonterminals = search_state.anchored_nonterminals[:]
new_anchored_nonterminals.append((idx, nt_end, nonterminal))
search_map[nt_end].append(
SearchState(new_anchored_nonterminals, new_trie_node))
# Loop through search_map for completed matches at span_end.
for search_state in search_map[span_end]:
# Get the ParserRule(s) associated with the particular Trie path.
matched_rules = search_state.trie_node.values
if not matched_rules:
continue
for rule in matched_rules:
# Given the ParserRule and anchored nonterminal positions, generate
# new chart entries and add chart.
if len(search_state.anchored_nonterminals) == 1:
# Matched rule contains 1 non-terminal.
for child in chart.get_from_key(
*search_state.anchored_nonterminals[0]):
chart.add(span_begin, span_end, rule, [child])
elif len(search_state.anchored_nonterminals) == 2:
# Matched rule contains 2 non-terminals.
for child_0 in chart.get_from_key(
*search_state.anchored_nonterminals[0]):
for child_1 in chart.get_from_key(
*search_state.anchored_nonterminals[1]):
chart.add(span_begin, span_end, rule, [child_0, child_1])
elif len(search_state.anchored_nonterminals) > 2:
raise ValueError
else:
# Matched rule contains 0 non-terminals.
chart.add(span_begin, span_end, rule, [])
for nt in nonterminals:
chart.postprocess(span_begin, span_end, nt)
if verbose:
for nt in nonterminals:
cell = chart.get_from_key(span_begin, span_end, nt)
if cell:
print("Populated (%s,%s): %s - %s" %
(span_begin, span_end, nt, cell))
# Return completed parses.
return chart.get_from_key(0, input_len, start_idx)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/common/cky/trie_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilties for testing."""
from official.nlp.bert import configs
# Tokens used in tests.
_TOKENS = [
"[PAD]", "[CLS]", "[SEP]", "[unused0]", "[unused1]", "foo", "bar", "what",
"river", "traverses", "the", "most", "states", "and"
]
class MockTokenizer(object):
"""Mock tokenizer to replace `tokenization.FullTokenizer` in tests."""
def __init__(self, **kwargs):
del kwargs
self.tokens_to_ids = {
token: token_id for token_id, token in enumerate(_TOKENS)
}
def tokenize(self, input_str):
return input_str.split()
def convert_tokens_to_ids(self, tokens):
return [self.tokens_to_ids[token] for token in tokens]
def get_test_config():
return {
"batch_size": 4,
"learning_rate": 0.001,
"training_steps": 10000,
"warmup_steps": 100,
"steps_per_iteration": 8,
"model_dims": 16,
"max_num_wordpieces": 8,
"max_num_applications": 8,
"max_num_numerator_nodes": 8,
"max_num_denominator_nodes": 8,
"max_num_rules": 8,
}
def get_test_bert_config():
return configs.BertConfig(
vocab_size=32,
hidden_size=8,
intermediate_size=8,
num_attention_heads=2,
num_hidden_layers=2)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/test_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the NQG neural parsing model.
The parsing model consists of a BERT encoder, feed forward layers to
compute vector representations for spans, and an embedding table for rules.
The model produces scores for anchored rule applications, which are based on
the span representations of the anchored span and a learned embedding for each
rule.
Note that the model is implemented in TensorFlow 2.x, based on the TF 2.x BERT
implementation here:
https://github.com/tensorflow/models/tree/master/official/nlp/bert
You can find documentation for downloading or converting BERT checkpoints to
be compatible with this implementation here:
https://github.com/tensorflow/models/tree/master/official/nlp/bert#pre-trained-models
"""
import tensorflow as tf
from official.nlp.bert import bert_models
def _feed_forward(output_dims, hidden_dims, name):
return tf.keras.Sequential([
tf.keras.layers.Dense(hidden_dims, activation="relu", name="%s_1" % name),
tf.keras.layers.Dense(output_dims, name="%s_2" % name)
],
name=name)
class ApplicationScoreLayer(tf.keras.layers.Layer):
"""Layer for computing scores for anchored rule applications.
Span begin and end indexes should both be *inclusive*, i.e.
for a span consisting of a single token the span begin and end indexes
will be the same.
It is up to the caller to establish consistent indexing of rules,
as this layer simply allocates an embedding table of size equal to the
max_num_rules in the config.
"""
def __init__(self, config):
super(ApplicationScoreLayer, self).__init__()
self.feed_forward = _feed_forward(
config["model_dims"], config["model_dims"], name="application_ffn")
self.span_feed_forward = _feed_forward(
1, config["model_dims"], name="span_ffn")
self.rule_embeddings = tf.keras.layers.Embedding(config["max_num_rules"],
config["model_dims"])
self.config = config
def score_application(self, wordpiece_encodings, application_span_begin,
application_span_end, application_rule_idx):
"""Computes scores for a single anchored rule applications.
Args:
wordpiece_encodings: <float>[max_num_wordpieces, bert_dims]
application_span_begin: <int>[1]
application_span_end: <int>[1]
application_rule_idx: <int>[1]
Returns:
application_score: <float>[1]
"""
# <float>[bert_dims]
span_begin_encoding = tf.gather(wordpiece_encodings, application_span_begin)
span_end_encoding = tf.gather(wordpiece_encodings, application_span_end)
# <float>[bert_dims * 2]
span_encoding = tf.concat([span_begin_encoding, span_end_encoding], axis=0)
# <float>[1, bert_dims * 2]
span_encoding = tf.expand_dims(span_encoding, 0)
# <float>[1, model_dims]
span_ffn_encoding = self.feed_forward(span_encoding)
# <float>[model_dims]
application_rule_embedddings = self.rule_embeddings(application_rule_idx)
# <float>[model_dims, 1]
application_rule_embedddings = tf.expand_dims(application_rule_embedddings,
1)
# <float>[1, 1]
application_score = tf.matmul(span_ffn_encoding,
application_rule_embedddings)
# <float>[]
application_score = tf.squeeze(application_score, [0, 1])
# <float>[1, 1]
span_score = self.span_feed_forward(span_encoding)
# <float>[]
span_score = tf.squeeze(span_score, [0, 1])
return application_score + span_score
def call(self, wordpiece_encodings, application_span_begin,
application_span_end, application_rule_idx):
"""Computes scores for a batch of anchored rule applications.
Args:
wordpiece_encodings: <float>[batch_size, max_num_wordpieces, bert_dims]
application_span_begin: <int>[batch_size, max_num_applications]
application_span_end: <int>[batch_size, max_num_applications]
application_rule_idx: <int>[batch_size, max_num_applications]
Returns:
application_scores: <float>[batch_size, max_num_applications]
"""
# <float>[batch_size, max_num_applications, bert_dims]
span_begin_encoding = tf.gather(
wordpiece_encodings, application_span_begin, batch_dims=1)
span_end_encoding = tf.gather(
wordpiece_encodings, application_span_end, batch_dims=1)
# <float>[batch_size, max_num_applications, bert_dims * 2]
span_encodings = tf.concat([span_begin_encoding, span_end_encoding], axis=2)
# <float>[batch_size, max_num_applications, model_dims]
span_encodings_ffn = self.feed_forward(span_encodings)
# <float>[batch_size, max_num_applications, 1, model_dims]
span_encodings_ffn = tf.expand_dims(span_encodings_ffn, 2)
# <float>[batch_size, max_num_applications, model_dims]
application_rule_embedddings = self.rule_embeddings(application_rule_idx)
# <float>[batch_size, max_num_applications, model_dims, 1]
application_rule_embedddings = tf.expand_dims(application_rule_embedddings,
3)
# <float>[batch_size, max_num_applications, 1, 1]
application_scores = tf.matmul(span_encodings_ffn,
application_rule_embedddings)
# <float>[batch_size, max_num_applications]
application_scores = tf.squeeze(application_scores, [2, 3])
# <float>[batch_size, max_num_applications, 1]
span_scores = self.span_feed_forward(span_encodings)
# <float>[batch_size, max_num_applications]
span_scores = tf.squeeze(span_scores, [2])
return application_scores + span_scores
class Model(tf.keras.layers.Layer):
"""Defines NQG neural parsing model."""
def __init__(self, batch_size, config, bert_config, training, verbose=False):
super(Model, self).__init__()
self.config = config
self.bert_encoder = bert_models.get_transformer_encoder(
bert_config, sequence_length=self.config["max_num_wordpieces"])
self.application_score_layer = ApplicationScoreLayer(config)
self.training = training
self.batch_size = batch_size
def call(self, wordpiece_ids_batch, num_wordpieces, application_span_begin,
application_span_end, application_rule_idx):
"""Returns scores for a batch of anchored rule applications.
Args:
wordpiece_ids_batch: <int>[batch_size, max_num_wordpieces]
num_wordpieces: <int>[batch_size, 1]
application_span_begin: <int>[batch_size, max_num_applications]
application_span_end: <int>[batch_size, max_num_applications]
application_rule_idx: <int>[batch_size, max_num_applications]
Returns:
application_scores: <float>[batch_size, max_num_applications]
"""
wordpiece_encodings_batch = self.get_wordpiece_encodings(
wordpiece_ids_batch, num_wordpieces)
application_scores_batch = self.application_score_layer(
wordpiece_encodings_batch, application_span_begin, application_span_end,
application_rule_idx)
return application_scores_batch
def get_wordpiece_encodings(self, wordpiece_ids_batch, num_wordpieces):
"""Returns contextualized encodings for a batch of wordpieces.
Args:
wordpiece_ids_batch: <int>[batch_size, max_num_wordpieces]
num_wordpieces: <int>[batch_size, 1]
Returns:
wordpiece_encodings: <float>[batch_size, max_num_wordpieces, bert_dims]
"""
num_wordpieces = tf.squeeze(num_wordpieces, 1)
bert_input_mask = tf.sequence_mask(
num_wordpieces, self.config["max_num_wordpieces"], dtype=tf.int32)
bert_type_ids = tf.zeros(
shape=[self.batch_size, self.config["max_num_wordpieces"]],
dtype=tf.int32)
wordpiece_encodings_batch, unused_cls_output = self.bert_encoder(
[wordpiece_ids_batch, bert_input_mask, bert_type_ids],
training=self.training)
return wordpiece_encodings_batch
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/nqg_model.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function for loading config json file."""
import json
from tensorflow.io import gfile
def json_file_to_dict(json_file):
"""Constructs a dictionary from a json file."""
with gfile.GFile(json_file, "r") as reader:
text = reader.read()
return json.loads(text)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/config_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run model training.
Currently, CPU and parallel GPU training is supported. TPU training is not
currently supported.
"""
import os
from absl import app
from absl import flags
from absl import logging
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.parser import config_utils
from model.parser import nqg_model
from model.parser.training import input_utils
from model.parser.training import training_utils
import tensorflow as tf
from official.nlp import optimization
from official.nlp.bert import configs
FLAGS = flags.FLAGS
flags.DEFINE_string(
"input", "",
"TFRecord(s) of tf.Examples (use * for matching multiple files).")
flags.DEFINE_string("model_dir", "", "Directory to save model files.")
flags.DEFINE_string(
"bert_dir", "",
"Directory for BERT, including config and (optionally) checkpoint.")
flags.DEFINE_string("config", "", "Config json file.")
flags.DEFINE_bool("restore_checkpoint", False,
"Whether to restore checkpoint if one exists in model_dir.")
flags.DEFINE_bool(
"init_bert_checkpoint", True,
"If True, init from checkpoint in bert_dir, otherwise use random init.")
flags.DEFINE_bool("use_gpu", False, "Whether to use GPU for training.")
flags.DEFINE_bool("verbose", False, "Whether to print debug output.")
def train_model(strategy):
"""Run model training."""
config = config_utils.json_file_to_dict(FLAGS.config)
dataset_fn = input_utils.get_dataset_fn(FLAGS.input, config)
writer = tf.summary.create_file_writer(os.path.join(FLAGS.model_dir, "train"))
dataset_iterator = iter(
strategy.experimental_distribute_datasets_from_function(dataset_fn))
bert_config = configs.BertConfig.from_json_file(
os.path.join(FLAGS.bert_dir, "bert_config.json"))
logging.info("Loaded BERT config: %s", bert_config.to_dict())
batch_size = int(config["batch_size"] / strategy.num_replicas_in_sync)
logging.info("num_replicas: %s.", strategy.num_replicas_in_sync)
logging.info("per replica batch_size: %s.", batch_size)
with strategy.scope():
model = nqg_model.Model(
batch_size, config, bert_config, training=True, verbose=FLAGS.verbose)
optimizer = optimization.create_optimizer(config["learning_rate"],
config["training_steps"],
config["warmup_steps"])
train_for_n_steps_fn = training_utils.get_train_for_n_steps_fn(
strategy, optimizer, model)
if FLAGS.init_bert_checkpoint:
bert_checkpoint = tf.train.Checkpoint(model=model.bert_encoder)
bert_checkpoint_path = os.path.join(FLAGS.bert_dir, "bert_model.ckpt")
logging.info("Restoring bert checkpoint: %s", bert_checkpoint_path)
logging.info("Bert vars: %s", model.bert_encoder.trainable_variables)
logging.info("Checkpoint vars: %s",
tf.train.list_variables(bert_checkpoint_path))
status = bert_checkpoint.restore(bert_checkpoint_path).expect_partial()
status.assert_existing_objects_matched()
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
current_step = 0
if FLAGS.restore_checkpoint:
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.model_dir)
# TODO(petershaw): This is a hacky way to read current step.
current_step = int(latest_checkpoint.split("-")[-2])
logging.info("Restoring %s at step %s.", latest_checkpoint, current_step)
status = checkpoint.restore(latest_checkpoint)
status.assert_existing_objects_matched()
with writer.as_default():
while current_step < config["training_steps"]:
logging.info("current_step: %s.", current_step)
mean_loss = train_for_n_steps_fn(
dataset_iterator,
tf.convert_to_tensor(config["steps_per_iteration"], dtype=tf.int32))
tf.summary.scalar("loss", mean_loss, step=current_step)
current_step += config["steps_per_iteration"]
if current_step and current_step % config["save_checkpoint_every"] == 0:
checkpoint_prefix = os.path.join(FLAGS.model_dir,
"ckpt-%s" % current_step)
logging.info("Saving checkpoint to %s.", checkpoint_prefix)
checkpoint.save(file_prefix=checkpoint_prefix)
def main(unused_argv):
if FLAGS.use_gpu:
strategy = tf.distribute.MirroredStrategy()
logging.info("Number of devices: %d", strategy.num_replicas_in_sync)
train_model(strategy)
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
train_model(strategy)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/training/train_model.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for iterating over serialized parse forests in TensorFlow."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.parser.data import data_constants
import tensorflow as tf
def get_forest_score_function(verbose=False):
"""Return forest_score_function."""
# TODO(petershaw): In order to use TPU, it is likely necessary to consider
# max_num_nodes as another input argument to initialize the arrays and
# while loop.
# However, this appears to still be insufficient for TPU compilation,
# so this requires further investigation.
@tf.function
def forest_score_function(application_scores, num_nodes, node_type_list,
node_1_idx_list, node_2_idx_list,
node_application_idx_list):
"""Iterate over nodes in forest and return score for root.
Note that the returned score is not exponentiated, i.e. it is equivalent to
the log of the sum of the exponentiated scores for individual parses in
the forest:
log(sum over parses(exp(sum over applications in parse(application score))))
This function benefits from dynamic programming to compute this sum more
efficiently.
Also note that input arguments should not be batched. This function could
potentially be made more efficient by implementing a batched version of
this computation. However, the computation in this function is limited to:
1. Control flow (while loop) and TensorArray read/write operations
2. Gather operations over application_scores
2. Summation and logsumexp
So the overall amount of computation should be small relative to
large encoders and computation of application_scores. Using an
implementation that is not batched also allows for returning early
for examples where the number of nodes is less than the maximum limit.
Args:
application_scores: <float>[max_num_applications] of raw scores (not
exponentiated) for anchored rule applications.
num_nodes: Integer number of nodes. By convention, the final non-padding
node is the root node and should correspond to the `num_nodes - 1` index
of the 4 `node_x` input tensors below.
node_type_list: <int>[max_num_nodes].
node_1_idx_list: <int>[max_num_nodes].
node_2_idx_list: <int>[max_num_nodes].
node_application_idx_list: <int>[max_num_nodes].
Returns:
Score for root node (see description above).
"""
if verbose:
tf.print("application_scores:", application_scores, summarize=1000)
# Write once / read array storing scores for each node.
# Note that the scores are not exponentiated.
node_array = tf.TensorArray(
tf.float32,
size=num_nodes,
dynamic_size=False,
clear_after_read=False,
element_shape=[])
# Return early, i.e. iterate only for num_nodes not max_num_nodes.
for idx in tf.range(num_nodes):
node_type = node_type_list[idx]
node_1_idx = node_1_idx_list[idx]
node_2_idx = node_2_idx_list[idx]
node_application_idx = node_application_idx_list[idx]
if verbose:
tf.print("idx:", idx)
tf.print("node_type:", node_type)
tf.print("node_1_idx:", node_1_idx)
tf.print("node_2_idx:", node_2_idx)
tf.print("node_application_idx:", node_application_idx)
if node_type == data_constants.RULE_APPLICATION:
score = 0.0
# All rule application nodes are associated with some application
# score.
score += application_scores[node_application_idx]
# Additionally, we add the scores for any children.
if node_1_idx != -1:
score += node_array.read(node_1_idx)
if node_2_idx != -1:
score += node_array.read(node_2_idx)
node_array = node_array.write(idx, score)
if verbose:
tf.print("Write RULE_APPLICATION node: ", idx, score)
elif node_type == data_constants.AGGREGATION:
# Merge nodes for common sub-trees.
node_1_score = node_array.read(node_1_idx)
node_2_score = node_array.read(node_2_idx)
# Use logsumexp trick for stable calculation.
score = tf.math.reduce_logsumexp(tf.stack([node_1_score, node_2_score]))
node_array = node_array.write(idx, score)
if verbose:
tf.print("Write AGGREGATION node: ", idx, score)
# Return final score (note that it is not exponentiated).
return node_array.read(num_nodes - 1)
return forest_score_function
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/training/forest_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to define model training loop."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.parser.training import forest_utils
import tensorflow as tf
def get_training_step(optimizer, model, verbose=False):
"""Get training step function."""
forest_score_function = forest_utils.get_forest_score_function(
verbose=verbose)
def training_step(inputs):
"""Executes a step of training."""
with tf.GradientTape() as tape:
loss = tf.constant(0.0, dtype=tf.float32)
application_scores_batch = model(inputs["wordpiece_ids"],
inputs["num_wordpieces"],
inputs["application_span_begin"],
inputs["application_span_end"],
inputs["application_rule_idx"])
nu_num_nodes_batch = tf.squeeze(inputs["nu_num_nodes"], 1)
de_num_nodes_batch = tf.squeeze(inputs["de_num_nodes"], 1)
with tf.name_scope("forest_score"):
# TODO(petershaw): Consider a batched implementation of
# forest_score_function to avoid iteration over examples in the batch.
for idx in tf.range(model.batch_size):
application_scores = application_scores_batch[idx]
nu_node_type = inputs["nu_node_type"][idx]
nu_node_1_idx = inputs["nu_node_1_idx"][idx]
nu_node_2_idx = inputs["nu_node_2_idx"][idx]
nu_application_idx = inputs["nu_application_idx"][idx]
nu_num_nodes = nu_num_nodes_batch[idx]
# Log score for numerator (sum over derivations of target).
nu_score = forest_score_function(application_scores, nu_num_nodes,
nu_node_type, nu_node_1_idx,
nu_node_2_idx, nu_application_idx)
de_node_type = inputs["de_node_type"][idx]
de_node_1_idx = inputs["de_node_1_idx"][idx]
de_node_2_idx = inputs["de_node_2_idx"][idx]
de_application_idx = inputs["de_application_idx"][idx]
de_num_nodes = de_num_nodes_batch[idx]
# Log score for denominator (partition function).
de_score = forest_score_function(application_scores, de_num_nodes,
de_node_type, de_node_1_idx,
de_node_2_idx, de_application_idx)
# -log(numerator/denominator) = log(denominator) - log(numerator)
example_loss = de_score - nu_score
loss += example_loss
loss /= tf.cast(model.batch_size, dtype=tf.float32)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return loss
return training_step
def get_train_for_n_steps_fn(strategy, optimizer, model):
"""Return train_for_n_steps_fn."""
training_step = get_training_step(optimizer, model)
@tf.function
def train_for_n_steps_fn(iterator, steps):
mean_loss = tf.constant(0.0, dtype=tf.float32)
for _ in tf.range(steps):
inputs = next(iterator)
loss = strategy.run(training_step, args=(inputs,))
mean_loss += strategy.reduce(tf.distribute.ReduceOp.MEAN, loss, axis=None)
mean_loss /= tf.cast(steps, dtype=tf.float32)
return mean_loss
return train_for_n_steps_fn
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/training/training_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for input pipeline.
The input pipeline should be both GPU and TPU friendly.
"""
import tensorflow as tf
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but tf.int32 can be faster and more
# memory efficient on certain hardware.
for name in list(example.keys()):
tensor = example[name]
if tensor.dtype == tf.int64:
tensor = tf.cast(tensor, dtype=tf.int32)
example[name] = tensor
return example
def _create_int_feature(length):
return tf.io.FixedLenFeature([length], tf.int64)
def create_training_dataset(input_file, batch_size, config):
"""Returns `tf.data.Dataset` for training."""
name_to_features = {}
name_to_features["wordpiece_ids"] = _create_int_feature(
config["max_num_wordpieces"])
name_to_features["num_wordpieces"] = _create_int_feature(1)
name_to_features["application_span_begin"] = _create_int_feature(
config["max_num_applications"])
name_to_features["application_span_end"] = _create_int_feature(
config["max_num_applications"])
name_to_features["application_rule_idx"] = _create_int_feature(
config["max_num_applications"])
name_to_features["nu_node_type"] = _create_int_feature(
config["max_num_numerator_nodes"])
name_to_features["nu_node_1_idx"] = _create_int_feature(
config["max_num_numerator_nodes"])
name_to_features["nu_node_2_idx"] = _create_int_feature(
config["max_num_numerator_nodes"])
name_to_features["nu_application_idx"] = _create_int_feature(
config["max_num_numerator_nodes"])
name_to_features["nu_num_nodes"] = _create_int_feature(1)
name_to_features["de_node_type"] = _create_int_feature(
config["max_num_denominator_nodes"])
name_to_features["de_node_1_idx"] = _create_int_feature(
config["max_num_denominator_nodes"])
name_to_features["de_node_2_idx"] = _create_int_feature(
config["max_num_denominator_nodes"])
name_to_features["de_application_idx"] = _create_int_feature(
config["max_num_denominator_nodes"])
name_to_features["de_num_nodes"] = _create_int_feature(1)
if "*" in input_file:
# Potentially match multiple input files.
files = tf.io.matching_files(input_file)
files = tf.random.shuffle(files)
shards = tf.data.Dataset.from_tensor_slices(files)
dataset = shards.interleave(tf.data.TFRecordDataset)
else:
# Only using single input file.
dataset = tf.data.TFRecordDataset(input_file)
dataset = dataset.repeat()
dataset = dataset.shuffle(buffer_size=1000)
decode_fn = lambda record: _decode_record(record, name_to_features)
dataset = dataset.map(
decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Send the single file to all workers.
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = (
tf.data.experimental.AutoShardPolicy.OFF)
dataset = dataset.with_options(options)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(1024)
return dataset
def get_dataset_fn(input_file, config):
"""Gets a closure to create a dataset.."""
global_batch_size = config["batch_size"]
def dataset_fn(ctx=None):
"""Returns tf.data.Dataset for distributed BERT pretraining."""
batch_size = ctx.get_per_replica_batch_size(
global_batch_size) if ctx else global_batch_size
dataset = create_training_dataset(input_file, batch_size, config)
return dataset
return dataset_fn
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/training/input_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for generating one-best targets given neural scoring model."""
import collections
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.qcfg import qcfg_parser
from model.qcfg import qcfg_rule
ScoredAnchoredRuleApplication = collections.namedtuple(
"ScoredAnchoredRuleApplication",
[
"rule", # QCFGRule.
"span_begin", # Integer.
"span_end", # Integer.
"score", # Float.
])
class ScoredChartNode(object):
"""Represents node in chart."""
def __init__(self, score_fn, span_begin, span_end, rule, children):
# Get score.
application_score = score_fn(rule, span_begin, span_end)
self.score = application_score
for node in children:
self.score += node.score
# Get target string.
target_string = qcfg_rule.apply_target(
rule, [node.target_string for node in children])
self.target_string = target_string
application = ScoredAnchoredRuleApplication(rule, span_begin, span_end,
application_score)
# List of ScoredAnchoredRuleApplication, which can be used to inspect
# parse tree for a given prediction.
self.applications = [application]
for node in children:
for application in node.applications:
self.applications.append(application)
def __str__(self):
return "%s (%s) [%s]" % (self.target_string, self.score, self.applications)
def __repr__(self):
return self.__str__()
def get_node_fn(score_fn):
"""Return node_fn."""
def node_fn(span_begin, span_end, rule, children):
return ScoredChartNode(score_fn, span_begin, span_end, rule, children)
return node_fn
def postprocess_cell_fn(nodes):
if not nodes:
return []
# Prune all nodes except the highest scoring node.
sorted_nodes = sorted(nodes, key=lambda x: -x.score)
return [sorted_nodes[0]]
def run_inference(source, rules, score_fn):
"""Determine one-best parse using score_fn.
Args:
source: Input string.
rules: Set of QCFGRules.
score_fn: Function with inputs (rule, span_begin, span_end) and returns
float score for a given anchored rule application. Note that `span_begin`
and `span_end` refer to token indexes, where span_end is exclusive, and
`rule` is a QCFGRule.
Returns:
(target string, score) for highest scoring derivation, or (None, None)
if there is no derivation for given source.
"""
tokens = source.split(" ")
node_fn = get_node_fn(score_fn)
nodes = qcfg_parser.parse(
tokens, rules, node_fn=node_fn, postprocess_cell_fn=postprocess_cell_fn)
if not nodes:
return None, None
if len(nodes) > 1:
raise ValueError("Multiple nodes returned for inference: %s" % nodes)
return nodes[0].target_string, nodes[0].score
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/inference/inference_parser.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary to generate predicted targets given input txt file of sources.
An input txt file of sources can be generated from a TSV file using
the `nqg/tasks/strip_targets.py` script.
This binary also supports evaluations for settings such as NQG-T5, where
predictions from T5 are used when NQG does not produce an output. Such
'fallback' predictions can be supplied via the `--fallback_predictions` flag.
"""
import os
import pdb
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.parser import config_utils
from model.parser.data import tokenization_utils
from model.parser.inference import inference_wrapper
from model.parser.inference.targets import target_grammar
from model.qcfg import qcfg_file
import tensorflow as tf
from official.nlp.bert import configs
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input txt file for sources.")
flags.DEFINE_string("output", "", "Output txt file for predicted targets.")
flags.DEFINE_bool("verbose", True, "Whether to print debug output.")
flags.DEFINE_string("model_dir", "", "Model directory.")
flags.DEFINE_string("checkpoint", "", "Checkpoint prefix, or None for latest.")
flags.DEFINE_string("config", "", "Config file.")
flags.DEFINE_string(
"bert_dir", "",
"Directory for BERT vocab, config, and (optionally) checkpoint.")
flags.DEFINE_string("rules", "", "QCFG rules txt file.")
flags.DEFINE_string("fallback_predictions", "",
"Optional fallback predictions txt file.")
flags.DEFINE_string("target_grammar", "", "Optional target CFG.")
def get_checkpoint():
if FLAGS.checkpoint:
return os.path.join(FLAGS.model_dir, FLAGS.checkpoint)
else:
return tf.train.latest_checkpoint(FLAGS.model_dir)
def get_inference_wrapper(config):
"""Construct and return InferenceWrapper."""
rules = qcfg_file.read_rules(FLAGS.rules)
tokenizer = tokenization_utils.get_tokenizer(
os.path.join(FLAGS.bert_dir, "vocab.txt"))
bert_config = configs.BertConfig.from_json_file(
os.path.join(FLAGS.bert_dir, "bert_config.json"))
target_grammar_rules = None
if FLAGS.target_grammar:
target_grammar_rules = target_grammar.load_rules_from_file(
FLAGS.target_grammar)
wrapper = inference_wrapper.InferenceWrapper(tokenizer, rules, config,
bert_config,
target_grammar_rules)
# Restore checkpoint.
checkpoint = get_checkpoint()
print("Loading from checkpoint: %s" % checkpoint)
wrapper.restore_checkpoint(checkpoint)
return wrapper
def get_predicted_target(wrapper, source, fallback_prediction):
nqg_prediction, _ = wrapper.get_output(source)
if nqg_prediction is None:
return fallback_prediction
else:
return nqg_prediction
def get_fallback_predictions(sources):
"""Return List of fallback predictions or List of `None` if not provided."""
if FLAGS.fallback_predictions:
fallback_predictions = []
with tf.io.gfile.GFile(FLAGS.fallback_predictions, "r") as predictions_file:
for line in predictions_file:
fallback_predictions.append(line.rstrip())
if len(sources) != len(fallback_predictions):
raise ValueError(
"Number of inputs != number of fallback predictions: %s vs. %s." %
(len(sources), len(fallback_predictions)))
return fallback_predictions
else:
return [None] * len(sources)
def main(unused_argv):
config = config_utils.json_file_to_dict(FLAGS.config)
wrapper = get_inference_wrapper(config)
sources = []
with tf.io.gfile.GFile(FLAGS.input, "r") as input_file:
for line in input_file:
sources.append(line.rstrip())
fallback_predictions = get_fallback_predictions(sources)
with tf.io.gfile.GFile(FLAGS.output, "w") as output_file:
for source, fallback_prediction in zip(sources, fallback_predictions):
try:
predicted_target = get_predicted_target(wrapper, source,
fallback_prediction)
except:
predicted_target = None
output_file.write("%s\n" % predicted_target)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/inference/generate_predictions.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for generating predictions with NQG model."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.parser import nqg_model
from model.parser.data import example_converter
from model.parser.data import tokenization_utils
from model.parser.inference import inference_parser
from model.parser.inference.targets import target_grammar
import tensorflow as tf
import pdb
def _convert_to_int_tensor(values, padded_length):
if len(values) > padded_length:
raise ValueError("length %s is > %s" % (len(values), padded_length))
for _ in range(len(values), padded_length):
values.append(0)
# Add outer dimension for batch size of 1.
feature = tf.convert_to_tensor([values])
return feature
def _get_score_fn(wordpiece_encodings, rules, model, token_start_wp_idx,
token_end_wp_idx):
"""Return score_fn."""
# Assigns same rule to idx mapping as used for training.
rule_key_to_idx_map = example_converter.get_rule_to_idx_map(rules)
def score_fn(rule, span_begin, span_end):
"""Returns scalar score for anchored rule application."""
application_span_begin = token_start_wp_idx[span_begin]
# Need to convert between token index used by QCFG rules,
# and wordpiece indexes used by neural model.
# token_end_wp_idx is an *inclusive* idx.
# span_end is an *exclusive* idx.
# application_span_end is an *inclusive* idx.
application_span_end = token_end_wp_idx[span_end - 1]
application_rule_idx = rule_key_to_idx_map[rule]
application_score = model.application_score_layer.score_application(
wordpiece_encodings, application_span_begin, application_span_end,
application_rule_idx)
return application_score.numpy()
return score_fn
class InferenceWrapper(object):
"""Provides interface for inference."""
def __init__(self,
tokenizer,
rules,
config,
bert_config,
target_grammar_rules=None,
verbose=False):
self.tokenizer = tokenizer
self.config = config
self.batch_size = 1
self.model = nqg_model.Model(
self.batch_size, config, bert_config, training=False)
self.checkpoint = tf.train.Checkpoint(model=self.model)
self.rules = rules
self.target_grammar_rules = target_grammar_rules
self.verbose = verbose
def restore_checkpoint(self, latest_checkpoint):
"""Restore model parameters from checkpoint."""
status = self.checkpoint.restore(latest_checkpoint)
status.assert_existing_objects_matched()
print("Restored checkpoint: %s" % latest_checkpoint)
def get_output(self, source):
"""Returns (one-best target string, score) or (None, None)."""
# Tokenize.
tokens = source.split(" ")
(wordpiece_ids, num_wordpieces, token_start_wp_idx,
token_end_wp_idx) = tokenization_utils.get_wordpiece_inputs(
tokens, self.tokenizer, self.config["max_num_wordpieces"])
# pdb.set_trace()
wordpieces_batch = _convert_to_int_tensor(wordpiece_ids,
self.config["max_num_wordpieces"])
# Run encoder.
wordpiece_encodings_batch = self.model.get_wordpiece_encodings(
wordpieces_batch, [[num_wordpieces]])
wordpiece_encodings = wordpiece_encodings_batch[0]
# Create score_fn.
score_fn = _get_score_fn(wordpiece_encodings, self.rules, self.model,
token_start_wp_idx, token_end_wp_idx)
# Run parser.
target_string, score = inference_parser.run_inference(
source, self.rules, score_fn)
# Validate target if target CFG provided.
if (target_string and self.target_grammar_rules and
not target_grammar.can_parse(target_string, self.target_grammar_rules)):
if self.verbose:
print("Invalid target: %s" % target_string)
return None, None
return target_string, score
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/inference/inference_wrapper.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary to evaluate model.
This binary can also be configured to run alongside a training job
and poll for new model checkpoints, writing eval metrics (e.g. for TensorBoard).
This binary also supports evaluations for settings such as NQG-T5, where
predictions from T5 are used when NQG does not produce an output. Such
'fallback' predictions can be supplied via the `--fallback_predictions` flag.
"""
import os
import time
import pdb
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.parser import config_utils
from model.parser.data import tokenization_utils
from model.parser.inference import inference_wrapper
from model.parser.inference.targets import target_grammar
from model.qcfg import qcfg_file
from tasks import tsv_utils
import tensorflow as tf
from official.nlp.bert import configs
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_integer("limit", 0,
"Index of example to begin processing (Ignored if 0).")
flags.DEFINE_integer("offset", 0,
"Index of example to end processing (Ignored if 0).")
flags.DEFINE_bool("verbose", True, "Whether to print debug output.")
flags.DEFINE_string("model_dir", "", "Model directory.")
flags.DEFINE_bool("poll", False, "Whether to poll.")
flags.DEFINE_bool("write", False, "Whether to write metrics to model_dir.")
flags.DEFINE_string("subdir", "eval_test",
"Sub-directory of model_dir for writing metrics.")
flags.DEFINE_string("checkpoint", "", "Checkpoint prefix, or None for latest.")
flags.DEFINE_string("config", "", "Config file.")
flags.DEFINE_string("bert_dir", "",
"Directory for BERT, including vocab and config.")
flags.DEFINE_string("rules", "", "QCFG rules txt file.")
flags.DEFINE_string("fallback_predictions", "",
"Optional fallback predictions txt file.")
flags.DEFINE_string("target_grammar", "", "Optional target CFG.")
def compute_metrics(wrapper, examples):
"""Compute accuracy on examples."""
# Initialize stats.
num_examples = 0
num_nqg_correct = 0
num_nqg_predictions = 0
num_fallback_correct = 0
num_hybrid_correct = 0
# pdb.set_trace()
fallback_predictions = None
if FLAGS.fallback_predictions:
fallback_predictions = []
predictions_file=FLAGS.fallback_predictions
print("Prediction file: ", predictions_file)
with tf.io.gfile.GFile(FLAGS.fallback_predictions, "r") as predictions_file:
for line in predictions_file:
fallback_predictions.append(line.rstrip())
for idx, example in enumerate(examples):
if FLAGS.offset and idx < FLAGS.offset:
continue
if FLAGS.limit and idx >= FLAGS.limit:
break
if FLAGS.verbose:
print("Processing example %s: %s" % (idx, example[0]))
num_examples += 1
source = example[0]
gold_target = example[1]
nqg_prediction, _ = wrapper.get_output(source)
# try:
# nqg_prediction, _ = wrapper.get_output(source)
# except:
# # The model cannot hande wordpieces that are too long
# # Skip the ones that are longer than max wordpieces
# nqg_prediction = None
if nqg_prediction:
num_nqg_predictions += 1
if nqg_prediction is not None and nqg_prediction.replace(" ", "") == gold_target.replace(" ", ""):
num_nqg_correct += 1
else:
if FLAGS.verbose:
print("nqg incorrect (gold vs. predicted):\n%s\n%s\n" %
(gold_target, nqg_prediction))
fallback_prediction = (
fallback_predictions[idx] if fallback_predictions else None)
if fallback_prediction is not None and fallback_prediction.replace(" ", "") == gold_target.replace(" ", ""):
num_fallback_correct += 1
else:
if FLAGS.verbose:
print("fallback incorrect (gold vs. predicted):\n%s\n%s\n" %
(gold_target, fallback_prediction))
hybrid_prediction = nqg_prediction or fallback_prediction
if hybrid_prediction is None:
print("None hybrid prediction, fallback pred: ", fallback_prediction)
if hybrid_prediction is not None and hybrid_prediction.replace(" ", "") == gold_target.replace(" ", ""):
num_hybrid_correct += 1
if FLAGS.verbose:
print("hybrid correct.")
else:
if FLAGS.verbose:
print("hybrid incorrect.")
metrics_dict = {
"nqg_accuracy": float(num_nqg_correct) / float(num_examples),
"fallback_accuracy": float(num_fallback_correct) / float(num_examples),
"hybrid_accuracy": float(num_hybrid_correct) / float(num_examples),
"nqg_coverage": float(num_nqg_predictions) / float(num_examples),
"nqg_precision": float(num_nqg_correct) / float(num_nqg_predictions) if num_nqg_predictions != 0 else 0,
}
if FLAGS.verbose:
print("num_examples: %s" % num_examples)
print("num_nqg_correct: %s" % num_nqg_correct)
print("num_nqg_predictions: %s" % num_nqg_predictions)
print("num_fallback_correct: %s" % num_fallback_correct)
print("num_hybrid_correct: %s" % num_hybrid_correct)
print("metrics_dict: %s" % metrics_dict)
return metrics_dict
def get_summary_writer():
if not FLAGS.write:
return None
return tf.summary.create_file_writer(
os.path.join(FLAGS.model_dir, FLAGS.subdir))
def write_metric(writer, name, metric, step):
with writer.as_default():
tf.summary.scalar(name, metric, step=step)
def get_checkpoint():
"""Return checkpoint path and step, or (None, None)."""
if FLAGS.checkpoint:
checkpoint = os.path.join(FLAGS.model_dir, FLAGS.checkpoint)
else:
checkpoint = tf.train.latest_checkpoint(FLAGS.model_dir)
# TODO(petershaw): Consider less hacky way to get current step.
step = None
if checkpoint is not None:
step = int(checkpoint.split("-")[-2])
print("Using checkpoint %s at step %s" % (checkpoint, step))
return checkpoint, step
def get_inference_wrapper(config):
"""Construct and return InferenceWrapper."""
rules = qcfg_file.read_rules(FLAGS.rules)
tokenizer = tokenization_utils.get_tokenizer(
os.path.join(FLAGS.bert_dir, "vocab.txt"))
bert_config = configs.BertConfig.from_json_file(
os.path.join(FLAGS.bert_dir, "bert_config.json"))
target_grammar_rules = None
if FLAGS.target_grammar:
target_grammar_rules = target_grammar.load_rules_from_file(
FLAGS.target_grammar)
wrapper = inference_wrapper.InferenceWrapper(tokenizer, rules, config,
bert_config,
target_grammar_rules)
return wrapper
def run_inference(writer, wrapper, examples, checkpoint, step=None):
"""Run inference."""
wrapper.restore_checkpoint(checkpoint)
metrics_dict = compute_metrics(wrapper, examples)
for metric_name, metric_value in metrics_dict.items():
print("%s at %s: %s" % (metric_name, step, metric_value))
if FLAGS.write:
write_metric(writer, metric_name, metric_value, step)
def main(unused_argv):
config = config_utils.json_file_to_dict(FLAGS.config)
wrapper = get_inference_wrapper(config)
examples = tsv_utils.read_tsv(FLAGS.input)
writer = get_summary_writer()
if FLAGS.poll:
last_checkpoint = None
while True:
checkpoint, step = get_checkpoint()
if checkpoint == last_checkpoint:
print("Waiting for new checkpoint...\nLast checkpoint: %s" %
last_checkpoint)
else:
run_inference(writer, wrapper, examples, checkpoint, step=step)
last_checkpoint = checkpoint
if step and step >= config["training_steps"]:
# Stop eval job after completing eval for last training step.
break
time.sleep(10)
else:
checkpoint, _ = get_checkpoint()
run_inference(writer, wrapper, examples, checkpoint)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/inference/eval_model.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to validate that targets are properly constructed.
The input is a CFG defining valid target constructions for a given task.
This can be viewed as a loose check that the target would be executable
for a given formalism and database.
This can be useful for NQG, which can otherwise over-generate syntactically
invalid targets as the grammars are restricted to a single non-terminal symbol.
"""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from common.cky import cfg_parser
from common.cky import cfg_rule
from tensorflow.io import gfile
# Used for string formatting.
NON_TERMINAL_PREFIX = "##"
ARROW = "=>"
# Root non-terminal symbol.
ROOT_SYMBOL = "ROOT"
# Special non-terminal that can match any terminal sequence.
ANYTHING = "ANYTHING"
class TargetCfgRule(object):
"""Represents a rule."""
def __init__(self, lhs, rhs):
self.lhs = lhs # String.
self.rhs = rhs # String.
def __str__(self):
return "%s %s %s" % (self.lhs, ARROW, self.rhs)
def __repr__(self):
return str(self)
@classmethod
def from_string(cls, rule_string):
symbols = rule_string.split(" ")
if symbols[1] != ARROW:
raise ValueError("Invalid rule_string: %s." % rule_string)
lhs = symbols[0]
rhs = " ".join(symbols[2:])
return cls(lhs, rhs)
def rules_to_txt_file(rules, filename):
"""Write rules to txt file."""
with gfile.GFile(filename, "w") as rule_file:
for rule in rules:
rule_file.write("%s\n" % str(rule))
print("Wrote %s rules to %s." % (len(rules), filename))
def load_rules_from_file(filename):
"""Load list of TargetCfgRules from txt file."""
rules = []
with gfile.GFile(filename, "r") as rule_file:
for line in rule_file:
# Allow blank lines and comment lines in grammar files starting with '#'.
if line and not line.startswith("#"):
line = line.rstrip()
rule = TargetCfgRule.from_string(line)
rules.append(rule)
print("Loaded %s rules from %s." % (len(rules), filename))
return rules
def _convert_to_parser_rule(rule, terminals_to_ids, nonterminals_to_ids,
rule_idx):
"""Convert Rule to CFGRule."""
rhs = []
for token in rule.rhs.split(" "):
if token.startswith(NON_TERMINAL_PREFIX):
symbol_idx = nonterminals_to_ids[token[len(NON_TERMINAL_PREFIX):]]
rhs.append(cfg_rule.CFGSymbol(idx=symbol_idx, type=cfg_rule.NON_TERMINAL))
else:
if token not in terminals_to_ids:
return None
symbol_idx = terminals_to_ids[token]
rhs.append(cfg_rule.CFGSymbol(idx=symbol_idx, type=cfg_rule.TERMINAL))
lhs = nonterminals_to_ids[rule.lhs]
parser_rule = cfg_rule.CFGRule(idx=rule_idx, lhs=lhs, rhs=rhs)
return parser_rule
def _populate_fn(unused_span_begin, unused_span_end, unused_parser_rule,
unused_children):
# We are only interested in the presence of a parse, not the parse itself.
# So, we use `True` to simply indicate the presence of some parse.
return True
def _postprocess_fn(nodes):
"""Merge any nodes."""
if nodes:
return [True]
else:
return []
def can_parse(target_string, rules, verbose=False):
"""Returns True if there exists >=1 parse of target_string given rules."""
tokens = target_string.split(" ")
# Add a rule for every span in target_string with lhs `ANYTHING`.
anything_rules = []
for start_idx in range(len(tokens)):
for end_idx in range(start_idx + 1, len(tokens) + 1):
rhs = " ".join(tokens[start_idx:end_idx])
anything_rules.append(TargetCfgRule(ANYTHING, rhs))
# Convert tokens to integer IDs.
terminals_to_ids = {}
for idx, token in enumerate(set(tokens)):
terminals_to_ids[token] = idx
input_ids = [terminals_to_ids[token] for token in tokens]
# Generate non-terminal IDs.
nonterminals_to_ids = {}
nt_idx = 0
for rule in rules + anything_rules:
if rule.lhs not in nonterminals_to_ids:
nonterminals_to_ids[rule.lhs] = nt_idx
nt_idx += 1
nonterminals = nonterminals_to_ids.values()
start_idx = nonterminals_to_ids[ROOT_SYMBOL]
# Convert rules.
parser_rules = []
for rule_idx, rule in enumerate(rules + anything_rules):
parser_rule = _convert_to_parser_rule(rule, terminals_to_ids,
nonterminals_to_ids, rule_idx)
if parser_rule:
parser_rules.append(parser_rule)
# Run parser.
parses = cfg_parser.parse(
input_ids,
parser_rules,
nonterminals,
start_idx,
_populate_fn,
_postprocess_fn,
verbose=verbose)
if parses:
return True
else:
return False
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/inference/targets/target_grammar.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate target CFG for SQL given Spider databases.
Note that for simplicity and because it has minimal impact on accuracy, the
grammar generated by this file is slightly different than the one used for the
experiments in the paper, which was specialized for each database.
"""
import json
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.parser.inference.targets import target_grammar
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("spider_tables", "", "Tables JSON file for Spider.")
flags.DEFINE_string("output", "", "Output rules txt file.")
RULES = [
"ROOT => ##ROOT union ##ROOT",
"ROOT => ##ROOT intersect ##ROOT",
"ROOT => ##ROOT except ##ROOT",
"ROOT => select ##EXPR",
"EXPR => ##T from ##T",
"EXPR => ##T from ##FROM",
"EXPR => ##EXPR where ##ANYTHING",
"EXPR => ##EXPR group by ##ANYTHING",
"EXPR => ##EXPR order by ##ANYTHING",
"EXPR => ##EXPR limit ##ANYTHING",
"T => ( ##ROOT )",
"T => ##T - ##T",
"T => ##T + ##T",
"T => ##T / ##T",
"T => ##T * ##T",
"T => ( ##T )",
"T => distinct ##T",
"T => distinct ( ##T )",
"T => ##T , ##T",
"T => *",
"T => ##T as ##T",
"T => t1",
"T => t2",
"T => t3",
"T => t4",
"T => t5",
"T => t6",
"T => t7",
"T => t8",
"T => t9",
"T => ##T . ##T",
"T => count ( ##T )",
"T => sum ( ##T )",
"T => avg ( ##T )",
"T => max ( ##T )",
"T => min ( ##T )",
"T => count ( ##T )",
"T => sum ( ##T )",
"T => avg ( ##T )",
"T => max ( ##T )",
"T => min ( ##T )",
"FROM => ##FROM join ##T",
"FROM => ##T join ##T",
"FROM => ##FROM on ##JOIN_COL",
"FROM => ##FROM and ##JOIN_COL",
"JOIN_COL => ##T = ##T"
]
def load_json(filepath):
with gfile.GFile(filepath, "r") as reader:
text = reader.read()
return json.loads(text)
def main(unused_argv):
tables_json = load_json(FLAGS.spider_tables)
rules = []
for rule_string in RULES:
rules.append(target_grammar.TargetCfgRule.from_string(rule_string))
schema_elements = set()
for table in tables_json:
columns = set(name for _, name in table["column_names_original"])
tables = set(table["table_names_original"])
schema_elements |= columns
schema_elements |= tables
for name in schema_elements:
rules.append(
target_grammar.TargetCfgRule.from_string("T => %s" % name.lower()))
target_grammar.rules_to_txt_file(rules, FLAGS.output)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/inference/targets/generate_spider_grammars.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for tokenization.
We use tokens to refer to coarsely tokenized (e.g. split on spaces) tokens
which is implicitly used for tokenization by the QCFG rules and parser.
We use wordpieces to refer to the wordpieces tokenized inputs for BERT.
"""
from official.nlp.bert import tokenization
# Map for special tokens.
SPECIAL_MAP = {
"m0": "[unused0]",
"m1": "[unused1]"
}
def get_tokenizer(bert_vocab_file):
tokenizer = tokenization.FullTokenizer(bert_vocab_file, do_lower_case=True)
return tokenizer
def get_wordpiece_inputs(tokens, tokenizer, verbose=False, max_num_wordpiece=80):
"""Returns inputs related to tokenization.
The resulting set of tensors includes alignment information between the
space-separated token sequence (which the QCFG parser uses) and the resulting
wordpiece sequence (which the neural encoder uses). There is always a
one-to-many correspondance between tokens and wordpieces.
Args:
tokens: List of string tokens.
tokenizer: `tokenization.FullTokenizer` instance or equivalent.
verbose: Print debug logging if True.
Returns:
A tuple of (wordpiece_ids, num_wordpieces, token_start_wp_idx,
token_end_wp_idx):
wordpiece_ids: List of wordpiece ids for input sequence.
num_wordpieces: Number of wordpieces.
token_start_wp_idx: Specifies the index in wordpiece_ids for the first
wordpiece for each input token (inclusive).
token_end_wp_idx: Specifies the index in wordpiece_ids for the last
wordpiece for each input token (inclusive).
"""
wordpiece_idx = 1
token_start_wp_idx = []
token_end_wp_idx = []
wordpieces = []
for token in tokens:
token_start_wp_idx.append(wordpiece_idx)
if token in SPECIAL_MAP:
wordpieces.append(SPECIAL_MAP[token])
wordpiece_idx += 1
else:
token_wordpieces = tokenizer.tokenize(token)
wordpieces.extend(token_wordpieces)
wordpiece_idx += len(token_wordpieces)
# Inclusive end idx.
token_end_wp_idx.append(wordpiece_idx - 1)
if verbose:
print("token_start_wp_idx: %s" % token_start_wp_idx)
print("token_end_wp_idx: %s" % token_end_wp_idx)
if len(token_start_wp_idx) != len(tokens):
# No truncation happens
raise ValueError("Bad token alignment!")
if len(token_end_wp_idx) != len(tokens):
raise ValueError("Bad token alignment!")
wordpieces = ["[CLS]"] + wordpieces + ["[SEP]"]
wordpiece_ids = tokenizer.convert_tokens_to_ids(wordpieces)
num_wordpieces = len(wordpiece_ids)
if verbose:
print("wordpieces: %s" % wordpieces)
print("wordpiece_ids: %s" % wordpiece_ids)
return (wordpiece_ids, num_wordpieces, token_start_wp_idx, token_end_wp_idx)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/data/tokenization_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants used in generating tf.Examples that are used across modules."""
# Forest node types.
RULE_APPLICATION = 1
AGGREGATION = 2
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/data/data_constants.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write tf.Example protos for model training.
This requires a dataset tsv file and a set of QCFG rules as input.
"""
import os
import pdb
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.environ['BASE_DIR'] + "baseline_replication/TMCD")
from model.parser import config_utils
from model.parser.data import example_converter
from model.parser.data import tokenization_utils
from model.qcfg import qcfg_file
from tasks import tsv_utils
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_string("output", "", "Output TF example file.")
flags.DEFINE_string("bert_dir", "", "Directory for BERT, including vocab file.")
flags.DEFINE_string("config", "", "Config file.")
flags.DEFINE_string("rules", "", "Input rules file.")
flags.DEFINE_integer("offset", 0, "Start index for examples to process.")
flags.DEFINE_integer("limit", 0, "End index for examples to process if >0.")
def main(unused_argv):
config = config_utils.json_file_to_dict(FLAGS.config)
examples = tsv_utils.read_tsv(FLAGS.input)
rules = qcfg_file.read_rules(FLAGS.rules)
tokenizer = tokenization_utils.get_tokenizer(
os.path.join(FLAGS.bert_dir, "vocab.txt"))
converter = example_converter.ExampleConverter(rules, tokenizer, config)
total_written = 0
writer = tf.io.TFRecordWriter(FLAGS.output)
for idx, example in enumerate(examples):
if FLAGS.offset and idx < FLAGS.offset:
continue
if FLAGS.limit and idx >= FLAGS.limit:
break
print("Processing example %s." % idx)
try:
tf_example = converter.convert(example)
writer.write(tf_example.SerializeToString())
total_written += 1
except:
print("Length of this grammar is larger than maximum length we can process, omitting this rule.")
converter.print_max_sizes()
print("Wrote %d examples." % total_written)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/data/write_examples.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for writing tf.Example files."""
import collections
import sys
import os
sys.path.append(os.environ['BASE_DIR'] + "baseline_replication/TMCD")
from model.parser.data import forest_serialization
from model.parser.data import parsing_utils
from model.parser.data import tokenization_utils
import tensorflow as tf
import pdb
def _pad_values(values, padded_length):
# EDIT: Added truncation for sequences longer than max sequence length length
if len(values) > padded_length:
# raise ValueError("length %s is > %s" % (len(values), padded_length))
values = values[:padded_length]
for _ in range(len(values), padded_length):
values.append(0)
return values
def _create_int_feature(values, padded_length):
values = _pad_values(values, padded_length)
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def get_rule_to_idx_map(rules):
rule_to_idx_map = {}
for idx, rule in enumerate(rules):
rule_to_idx_map[rule] = idx + 1 # Reserve 0 for padding.
return rule_to_idx_map
def _get_applications(root_node, rule_to_idx_map, token_start_wp_idx,
token_end_wp_idx):
"""Returns structures for anchored applications."""
# Traverse all nodes.
node_stack = [root_node]
seen_fingerprints = set()
# Set of (span_begin, span_end, rule).
applications = set()
while node_stack:
node = node_stack.pop()
fingerprint = id(node)
if fingerprint in seen_fingerprints:
continue
seen_fingerprints.add(fingerprint)
if isinstance(node, parsing_utils.AggregationNode):
for child in node.children:
node_stack.append(child)
elif isinstance(node, parsing_utils.RuleApplicationNode):
for child in node.children:
node_stack.append(child)
applications.add((node.span_begin, node.span_end, node.rule))
else:
raise ValueError("Unexpected node type.")
# Map of (span_begin, span_end, rule) to integer idx.
application_key_to_idx_map = {}
# Lists of integers.
application_span_begin = []
application_span_end = []
application_rule_idx = []
# Sort applications to avoid non-determinism.
applications = sorted(applications)
for idx, (span_begin, span_end, rule) in enumerate(applications):
application_key_to_idx_map[(span_begin, span_end, rule)] = idx
application_span_begin.append(token_start_wp_idx[span_begin])
# token_end_wp_idx is an *inclusive* idx.
# span_end is an *exclusive* idx.
# application_span_end is an *inclusive* idx.
application_span_end.append(token_end_wp_idx[span_end - 1])
rule_idx = rule_to_idx_map[rule]
application_rule_idx.append(rule_idx)
return (application_key_to_idx_map, application_span_begin,
application_span_end, application_rule_idx)
def _convert_to_tf_example(example, tokenizer, rules, config, max_sizes=None):
"""Return tf.Example generated for input (source, target)."""
source = example[0]
target = example[1]
tokens = source.split(" ")
num_tokens = len(tokens)
# Tokenize.
(wordpiece_ids, num_wordpieces, token_start_wp_idx,
token_end_wp_idx) = tokenization_utils.get_wordpiece_inputs(
tokens, tokenizer)
# Run chart parser.
target_node = parsing_utils.get_target_node(source, target, rules)
if not target_node:
raise ValueError("No parse returned for target for example: (%s, %s)" %
(source, target))
merged_node = parsing_utils.get_merged_node(source, rules)
# Get anchored applications.
rule_to_idx_map = get_rule_to_idx_map(rules)
(application_key_to_idx_map, application_span_begin, application_span_end,
application_rule_idx) = _get_applications(merged_node, rule_to_idx_map,
token_start_wp_idx,
token_end_wp_idx)
num_applications = len(application_span_begin)
# Raise error if the rule is applied in parts that are longer than max sequence length
if application_span_end[0] >= config["max_num_wordpieces"]:
raise ValueError("Rule application ends at %s >= %s" % (application_span_end, config["max_num_wordpieces"]))
def application_idx_fn(span_begin, span_end, rule):
return application_key_to_idx_map[(span_begin, span_end, rule)]
# Get numerator forest.
(nu_node_type, nu_node_1_idx, nu_node_2_idx, nu_application_idx,
nu_num_nodes) = forest_serialization.get_forest_lists(
target_node, num_tokens, application_idx_fn)
# Get denominator forest.
(de_node_type, de_node_1_idx, de_node_2_idx, de_application_idx,
de_num_nodes) = forest_serialization.get_forest_lists(
merged_node, num_tokens, application_idx_fn)
# Create features dict.
features = collections.OrderedDict()
features["wordpiece_ids"] = _create_int_feature(wordpiece_ids,
config["max_num_wordpieces"])
features["num_wordpieces"] = _create_int_feature([num_wordpieces], 1)
features["application_span_begin"] = _create_int_feature(
application_span_begin, config["max_num_applications"])
features["application_span_end"] = _create_int_feature(
application_span_end, config["max_num_applications"])
features["application_rule_idx"] = _create_int_feature(
application_rule_idx, config["max_num_applications"])
features["nu_node_type"] = _create_int_feature(
nu_node_type, config["max_num_numerator_nodes"])
features["nu_node_1_idx"] = _create_int_feature(
nu_node_1_idx, config["max_num_numerator_nodes"])
features["nu_node_2_idx"] = _create_int_feature(
nu_node_2_idx, config["max_num_numerator_nodes"])
features["nu_application_idx"] = _create_int_feature(
nu_application_idx, config["max_num_numerator_nodes"])
features["nu_num_nodes"] = _create_int_feature([nu_num_nodes], 1)
features["de_node_type"] = _create_int_feature(
de_node_type, config["max_num_denominator_nodes"])
features["de_node_1_idx"] = _create_int_feature(
de_node_1_idx, config["max_num_denominator_nodes"])
features["de_node_2_idx"] = _create_int_feature(
de_node_2_idx, config["max_num_denominator_nodes"])
features["de_application_idx"] = _create_int_feature(
de_application_idx, config["max_num_denominator_nodes"])
features["de_num_nodes"] = _create_int_feature([de_num_nodes], 1)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
# Update max sizes.
if max_sizes is not None:
max_sizes["num_wordpieces"] = max(max_sizes["num_wordpieces"],
num_wordpieces)
max_sizes["num_applications"] = max(max_sizes["num_applications"],
num_applications)
max_sizes["nu_num_nodes"] = max(max_sizes["nu_num_nodes"], nu_num_nodes)
max_sizes["de_num_nodes"] = max(max_sizes["de_num_nodes"], de_num_nodes)
return tf_example
class ExampleConverter(object):
"""Converts inputs to tf.Example protos."""
def __init__(self, rules, tokenizer, config):
self.rules = rules
self.tokenizer = tokenizer
self.config = config
self.max_sizes = collections.defaultdict(int)
def convert(self, example):
"""Return tf.Example or Raise."""
tf_example = _convert_to_tf_example(example, self.tokenizer, self.rules,
self.config, self.max_sizes)
return tf_example
def print_max_sizes(self):
"""Print max sizes which is useful for determining necessary padding."""
print("max_sizes: %s" % self.max_sizes)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/data/example_converter.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for generating input tensors for parse forests.
The output of the QCFG parser used for pre-processing is a forest
representation of a set of parses. This representation factors common sub-trees
to represent exponentially many trees in an effecient manner.
In our TensorFlow graph, we want to sum over scores for the given set of parse
trees, using dynamic programming over the forest representation for effeciency.
Therefore, this module serializes the forest into a set of integer lists that
collectively represent a sequence of nodes, with child nodes always preceding
their parents. We create new nodes as necessary so that no node has more than
2 children.
"""
import collections
import sys
import os
sys.path.append(os.environ['BASE_DIR'] + "baseline_replication/TMCD")
from model.parser.data import data_constants
from model.parser.data import parsing_utils
def _get_node_fingerprint(node):
return id(node)
def _get_span_to_nodes_maps(root_node):
"""Return maps of span indexes to nodes."""
node_stack = [root_node]
seen_fingerprints = set()
span_to_production_nodes = collections.defaultdict(list)
span_to_aggregation_nodes = collections.defaultdict(list)
while node_stack:
node = node_stack.pop()
fingerprint = _get_node_fingerprint(node)
if fingerprint in seen_fingerprints:
continue
seen_fingerprints.add(fingerprint)
if isinstance(node, parsing_utils.AggregationNode):
for child in node.children:
node_stack.append(child)
span_to_aggregation_nodes[(node.span_begin, node.span_end)].append(node)
elif isinstance(node, parsing_utils.RuleApplicationNode):
for child in node.children:
node_stack.append(child)
span_to_production_nodes[(node.span_begin, node.span_end)].append(node)
else:
raise ValueError("Unexpected node type.")
return span_to_production_nodes, span_to_aggregation_nodes
def get_forest_lists(root_node, num_tokens, application_idx_fn):
"""Get integer lists for serialized forest.
Args:
root_node: Root parsing_utils.ForestNode for parse forest.
num_tokens: Number of tokens in input.
application_idx_fn: Takes (span_begin, span_end, rule) and returns a idx.
Returns:
A tuple (node_type_list, node_1_idx_list, node_2_idx_list,
application_idx_list, num_nodes). All of these are lists of integers
with length equal to the number of nodes in the forest, except for num_nodes
which is the integer number of nodes in the forest. The lists include
the following information:
node_type_list: Where node is of type AGGREGATION or RULE_APPLICATION.
node_1_idx_list: If node has >= 1 children, this is the index of its
first child. A node index refers to its index in these lists.
If node has no children, will be -1.
node_2_idx_list: If node has 2 children, this is the index of its
second child, otherwise will be -1.
application_idx_list: If node is of type RULE_APPLICATION, this is
the index of the anchored rule application, where indexing is
defined by application_idx_fn.
"""
(span_to_production_nodes,
span_to_aggregation_nodes) = _get_span_to_nodes_maps(root_node)
# Setup empty lists.
node_type_list = []
node_1_idx_list = []
node_2_idx_list = []
application_idx_list = []
# Map of fingerprints to index.
fingerprint_to_idx = {}
current_index = 0
# Iterate through chart.
for span_end in range(1, num_tokens + 1):
for span_begin in range(span_end - 1, -1, -1):
if (span_begin, span_end) in span_to_production_nodes:
for node in span_to_production_nodes[(span_begin, span_end)]:
fingerprint = _get_node_fingerprint(node)
fingerprint_to_idx[fingerprint] = current_index
current_index += 1
if not isinstance(node, parsing_utils.RuleApplicationNode):
raise ValueError
node_type_list.append(data_constants.RULE_APPLICATION)
if not node.children:
node_1_idx_list.append(-1)
node_2_idx_list.append(-1)
elif len(node.children) == 1:
node_1_idx_list.append(fingerprint_to_idx[_get_node_fingerprint(
node.children[0])])
node_2_idx_list.append(-1)
elif len(node.children) == 2:
node_1_idx_list.append(fingerprint_to_idx[_get_node_fingerprint(
node.children[0])])
node_2_idx_list.append(fingerprint_to_idx[_get_node_fingerprint(
node.children[1])])
else:
raise ValueError
application_idx_list.append(
application_idx_fn(node.span_begin, node.span_end, node.rule))
for node in span_to_aggregation_nodes[(span_begin, span_end)]:
if not isinstance(node, parsing_utils.AggregationNode):
raise ValueError
node_type_list.append(data_constants.AGGREGATION)
application_idx_list.append(-1)
# Compute sum of first 2 nodes.
node_1_fingerprint = _get_node_fingerprint(node.children[0])
node_1_idx = fingerprint_to_idx[node_1_fingerprint]
node_1_idx_list.append(node_1_idx)
node_2_fingerprint = _get_node_fingerprint(node.children[1])
node_2_idx = fingerprint_to_idx[node_2_fingerprint]
node_2_idx_list.append(node_2_idx)
current_index += 1
# Sum the remaining.
for idx in range(2, len(node.children)):
node_type_list.append(data_constants.AGGREGATION)
application_idx_list.append(-1)
node_1_idx_list.append(current_index - 1)
node_2_idx = fingerprint_to_idx[_get_node_fingerprint(
node.children[idx])]
node_2_idx_list.append(node_2_idx)
current_index += 1
# Point to last node for index.
fingerprint = _get_node_fingerprint(node)
fingerprint_to_idx[fingerprint] = current_index - 1
num_nodes = current_index
return (node_type_list, node_1_idx_list, node_2_idx_list,
application_idx_list, num_nodes)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/data/forest_serialization.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for generating parse forests for model training."""
import collections
import sys
import os
sys.path.append(os.environ['BASE_DIR'] + "baseline_replication/TMCD")
from model.qcfg import qcfg_parser
from model.qcfg import qcfg_rule
class ForestNode(object):
"""Parent class representing a node in parse forest."""
def __init__(self, span_begin, span_end, target_string, rule, children):
self.span_begin = span_begin
self.span_end = span_end
self.target_string = target_string
self.rule = rule # Can be None for AggregationNode.
self.children = children # List of ForestNode.
def __str__(self):
node_type = self.__class__.__name__
return "%s (%s, %s): %s, %s" % (node_type, self.span_begin,
self.span_end,
self.target_string,
self.rule)
def __repr__(self):
return self.__str__()
class AggregationNode(ForestNode):
"""Represents an aggregation over multiple nodes."""
def __init__(self, children):
target_string = children[0].target_string
span_begin = children[0].span_begin
span_end = children[0].span_end
# All nodes should have the same span and target_string.
for node in children:
if ((node.target_string, node.span_begin, node.span_end) !=
(target_string, span_begin, span_end)):
raise ValueError("Cannot aggreagate different spans or targets: %s" %
children)
super(AggregationNode, self).__init__(span_begin, span_end, target_string,
None, children)
class RuleApplicationNode(ForestNode):
"""Represents an anchored rule application."""
def __init__(self, rule, children, span_begin, span_end, target_string):
super(RuleApplicationNode, self).__init__(span_begin, span_end,
target_string, rule, children)
def _fingerprint(node):
return node.target_string
def _aggregate(nodes):
"""Returns list of nodes aggregated by target string."""
fingerprints_to_nodes = collections.OrderedDict()
aggregated_nodes = []
for node in nodes:
fingerprint = _fingerprint(node)
if fingerprint not in fingerprints_to_nodes:
fingerprints_to_nodes[fingerprint] = []
fingerprints_to_nodes[fingerprint].append(node)
for _, nodes in fingerprints_to_nodes.items():
if len(nodes) > 1:
aggregated_node = AggregationNode(nodes)
aggregated_nodes.append(aggregated_node)
else:
aggregated_nodes.append(nodes[0])
return aggregated_nodes
def filter_nodes(nodes, target_string):
new_nodes = []
for node in nodes:
if node.target_string not in target_string:
continue
new_nodes.append(node)
return new_nodes
def get_target_node(source, target, rules):
"""Return node corresponding to parses for target, or None."""
tokens = source.split(" ")
def node_fn(span_begin, span_end, rule, children):
target_string = qcfg_rule.apply_target(
rule, [node.target_string for node in children])
return RuleApplicationNode(rule, children, span_begin, span_end,
target_string)
def postprocess_fn(nodes):
nodes = filter_nodes(nodes, target)
return _aggregate(nodes)
nodes = qcfg_parser.parse(
tokens, rules, node_fn=node_fn, postprocess_cell_fn=postprocess_fn)
# Filter for nodes where target_string matches target exactly.
ret_nodes = []
for node in nodes:
if node.target_string == target:
ret_nodes.append(node)
if not ret_nodes:
return None
if len(ret_nodes) > 1:
raise ValueError
return ret_nodes[0]
def get_merged_node(source, rules):
"""Return node corresponding to all parses."""
tokens = source.split(" ")
def node_fn(span_begin, span_end, rule, children):
# Target string is ignored for this case.
target_string = None
return RuleApplicationNode(rule, children, span_begin, span_end,
target_string)
def postprocess_fn(nodes):
if len(nodes) > 1:
return [AggregationNode(nodes)]
else:
return nodes
nodes = qcfg_parser.parse(
tokens, rules, node_fn=node_fn, postprocess_cell_fn=postprocess_fn)
if len(nodes) != 1:
raise ValueError("example `%s` len(nodes) != 1: %s" % (source, nodes))
return nodes[0]
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/data/parsing_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data structures for representing Quasi-Synchronous CFG (QCFG) rules.
Currently, both terminal and non-terminal symbols are simply represented
as strings, with special strings reserved for non-terminals.
QCFG rules used by NQG follow the following restrictions:
- There is only one non-terminal symbol, `NT`
- The only allowed non-terminal indexes are 1 and 2.
Therefore, we only need to reserve two strings to represent indexed
non-terminals.
We also expect all rules to be normalized as follows: a non-terminal with index
2 should never appear before a non-terminal with index 1 in the source
sequence.
Note that this data structure could potentially be improved:
1. A more flexible representation for terminal and non-terminal symbols
would avoid possible collisions between terminal and non-terminal symbols,
and allow for representing QCFGs that do not conform to the restrictions above.
2. Representing symbols as integers rather than strings may have computational
benefits for various operations over QCFG rules.
"""
import collections
# Represents the non-terminal symbol `NT` with linked index 1.
NT_1 = "NT_1"
# Represents the non-terminal symbol `NT` with linked index 2.
NT_2 = "NT_2"
# All other strings are assumed to represent terminal symbols.
# The LHS non-terminal is always assumed to be `NT` so is not represented.
QCFGRuleParent = collections.namedtuple(
"QCFGRuleParent",
[
"source", # Tuple of source symbols (strings).
"target", # Tuple of target symbols (strings).
"arity", # The number of unique non-terminal indexes (0, 1, or 2).
])
# Used for separating source and target sequences for string formatting.
SEPARATOR = "###"
# Define sub-class to override __str__ and __repr__ for easier debugging.
class QCFGRule(QCFGRuleParent):
def __str__(self):
return "%s %s %s" % (" ".join(self.source), SEPARATOR, " ".join(
self.target))
def __repr__(self):
return str(self)
def _get_arity(source):
if NT_1 in source and NT_2 in source:
return 2
if NT_1 in source:
return 1
if NT_2 in source:
raise ValueError("Source is unnormalized: %s" % source)
return 0
def rule_from_string(rule_str):
"""Parse rule in format 'source SEPARATOR target'."""
splits = rule_str.split(SEPARATOR)
if len(splits) != 2:
raise ValueError("Invalid rule string: %s" % rule_str)
source_str, target_str = splits
source = source_str.strip().split()
target = target_str.strip().split()
arity = _get_arity(source)
return QCFGRule(tuple(source), tuple(target), arity)
def apply_target(rule, substitutions):
"""Return target string with non-terminals replaced with substitutions."""
if rule.arity != len(substitutions):
raise ValueError
output = []
for token in rule.target:
if token == NT_1:
output.append(substitutions[0])
elif token == NT_2:
output.append(substitutions[1])
else:
output.append(token)
return " ".join(output)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/qcfg/qcfg_rule.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read and write QCFG grammars to/from human readable txt files."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.qcfg import qcfg_rule
from tensorflow.io import gfile
def read_rules(filename):
"""Read rule txt file to list of rules."""
rules = []
with gfile.GFile(filename, "r") as txt_file:
for line in txt_file:
line = line.rstrip()
rule = qcfg_rule.rule_from_string(line)
rules.append(rule)
print("Loaded %s rules from %s." % (len(rules), filename))
return rules
def write_rules(rules, filename):
"""Write rules to txt file."""
with gfile.GFile(filename, "w") as txt_file:
for rule in rules:
line = "%s\n" % str(rule)
txt_file.write(line)
print("Wrote %s rules to %s." % (len(rules), filename))
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/qcfg/qcfg_file.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for QCFG parsing by extending a general CFG parser."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from common.cky import cfg_parser
from common.cky import cfg_rule
from model.qcfg import qcfg_rule
def _convert_rhs(rule, nt_idx, tokens_to_input_ids):
"""Convert rule to `rhs` argument for CFGRule."""
rhs = []
for token in rule.source:
if token == qcfg_rule.NT_1:
rhs.append(cfg_rule.CFGSymbol(idx=nt_idx, type=cfg_rule.NON_TERMINAL))
elif token == qcfg_rule.NT_2:
rhs.append(cfg_rule.CFGSymbol(idx=nt_idx, type=cfg_rule.NON_TERMINAL))
else:
if token not in tokens_to_input_ids:
# Rule contains tokens not in the input so can be ignored for parsing.
return None
else:
token_id = tokens_to_input_ids[token]
rhs.append(cfg_rule.CFGSymbol(idx=token_id, type=cfg_rule.TERMINAL))
return tuple(rhs)
def parse(tokens, rules, node_fn, postprocess_cell_fn, verbose=False):
"""Run bottom up parser.
Args:
tokens: List of strings for input.
rules: List of QCFGRule instances.
node_fn: Function with input arguments (span_begin, span_end, rule,
children) and returns a "node".
postprocess_cell_fn: Function from a list of "nodes" to "nodes".
verbose: Print debug output if True.
Returns:
A List of "node" objects for completed parses.
"""
if verbose:
print("tokens: %s" % (tokens,))
print("rules:")
for rule in rules:
print(str(rule))
# Convert tokens to integer IDs.
tokens_to_input_ids = {}
input_ids_to_tokens = {}
for idx, token in enumerate(set(tokens)):
input_ids_to_tokens[idx] = token
tokens_to_input_ids[token] = idx
input_ids = [tokens_to_input_ids[token] for token in tokens]
# Our QCFG grammars always use a single NT symbol.
nt_idx = 0
# Convert to ParserRule format.
idx_to_rule = {}
parser_rules = []
rule_idx = 0
for rule in rules:
rhs = _convert_rhs(rule, nt_idx, tokens_to_input_ids)
if rhs is None:
continue
parser_rule = cfg_rule.CFGRule(idx=rule_idx, lhs=nt_idx, rhs=rhs)
parser_rules.append(parser_rule)
idx_to_rule[rule_idx] = rule
rule_idx += 1
# Wrap node_fn to pass original Rule instead of CFGRule.
def populate_fn(span_begin, span_end, parser_rule, children):
rule = idx_to_rule[parser_rule.idx]
return node_fn(span_begin, span_end, rule, children)
nonterminals = {nt_idx}
start_idx = nt_idx
if verbose:
print("parser_rules: %s" % parser_rules)
parses = cfg_parser.parse(
input_ids,
parser_rules,
nonterminals,
start_idx,
populate_fn,
postprocess_cell_fn,
verbose=verbose)
return parses
def can_parse(source, target, rules, verbose=False):
"""Return True if source and target can be derived given rules using parser.
Args:
source: Source string (cannot contain non-terminals).
target: Target string (cannot contain non-terminals).
rules: List of QCFGRule instances.
verbose: Print debug output if True.
Returns:
True if source and target can be derived.
"""
def node_fn(unused_span_begin, unused_span_end, rule, children):
"""Represent nodes as target strings."""
return qcfg_rule.apply_target(rule, children)
def postprocess_cell_fn(nodes):
"""Filter and merge generated nodes."""
new_nodes = []
for node in nodes:
# Discard targets that are not substrings of the gold target.
if node in target:
new_nodes.append(node)
return list(set(new_nodes))
tokens = source.split(" ")
outputs = parse(
tokens,
rules,
verbose=verbose,
node_fn=node_fn,
postprocess_cell_fn=postprocess_cell_fn)
if outputs and target in outputs:
return True
else:
return False
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/qcfg/qcfg_parser.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for qcfg_parser."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.qcfg import qcfg_parser
from model.qcfg import qcfg_rule
import tensorflow as tf
def _node_fn(unused_span_begin, unused_span_end, rule, children):
"""Nodes will represent target strings."""
return qcfg_rule.apply_target(rule, children)
def _postprocess_cell_fn(nodes):
return nodes
class QcfgParserTest(tf.test.TestCase):
def test_parse(self):
tokens = ["dax", "twice"]
rules = [
qcfg_rule.rule_from_string("dax ### DAX"),
qcfg_rule.rule_from_string("NT_1 twice ### NT_1 NT_1"),
]
parses = qcfg_parser.parse(tokens, rules, _node_fn, _postprocess_cell_fn)
self.assertEqual(parses, ["DAX DAX"])
def test_parse_flat(self):
tokens = ["dax", "twice"]
rules = [
qcfg_rule.rule_from_string("dax twice ### DAX TWICE"),
]
parses = qcfg_parser.parse(tokens, rules, _node_fn, _postprocess_cell_fn)
self.assertEqual(parses, ["DAX TWICE"])
def test_can_parse(self):
rules = [
qcfg_rule.rule_from_string("dax ### DAX"),
qcfg_rule.rule_from_string("NT_1 twice ### NT_1 NT_1"),
]
can_parse = qcfg_parser.can_parse(
source="dax twice", target="DAX DAX", rules=rules)
self.assertTrue(can_parse)
if __name__ == "__main__":
tf.test.main()
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/qcfg/qcfg_parser_test.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute % of examples in a dataset that can be derived by a given QCFG."""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.qcfg import qcfg_file
from model.qcfg import qcfg_parser
from tasks import tsv_utils
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_integer("limit", 100, "End processing at this example index.")
flags.DEFINE_integer("offset", 0, "Start processing at this example index.")
flags.DEFINE_string("rules", "", "Grammar rules txt file.")
def main(unused_argv):
examples = tsv_utils.read_tsv(FLAGS.input)
rules = qcfg_file.read_rules(FLAGS.rules)
print("Rules: %s" % rules)
num_examples = 0
num_covered = 0
for idx, example in enumerate(examples):
if FLAGS.offset and idx < FLAGS.offset:
continue
if FLAGS.limit and idx >= FLAGS.limit:
break
print("Processing example %s." % idx)
print("Source: %s" % example[0])
print("Target: %s" % example[1])
source = example[0]
gold_target = example[1]
can_parse = qcfg_parser.can_parse(source, gold_target, rules, verbose=False)
num_examples += 1
if can_parse:
num_covered += 1
else:
print("Output set does not contain gold target.")
print("%s covered out of %s" % (num_covered, num_examples))
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/qcfg/compute_recall.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for grammar induction."""
import collections
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.induction import codelength_utils
from model.induction import derivation_utils
from model.induction import exact_match_utils
from model.induction import rule_utils
from model.induction import split_utils
from model.qcfg import qcfg_parser
from model.qcfg import qcfg_rule
InductionConfig = collections.namedtuple("InductionConfig", [
"sample_size",
"max_iterations",
"min_delta",
"terminal_codelength",
"non_terminal_codelength",
"parse_sample",
"allow_repeated_target_nts",
"seed_exact_match",
"balance_parens",
])
# We track the state of search during rule induction in the following tuple.
# Note that our implementation relies on two important aspects:
# 1. We can quickly determine if any substitution can potentially exist such
# that a given rule can be used to derive a given string pair, based only
# on terminal symbol overlap.
# 2. The set of derivable string pairs in our induced grammar is monotonically
# increasing, based on our criteria for adding and removing rules.
SearchState = collections.namedtuple(
"SearchState",
[
"current_rules", # Set of rules in induced grammar.
"rules_to_candidates", # Dictionary of rules to candidates.
"derivable_rules", # Set of derivable rules.
])
def _find_affected_rules(rules, new_rule):
# TODO(petershaw): This can potentially be made more effecient by
# pre-indexing rules in a data structure such as a Trie.
found_rules = []
for rule in rules:
if (rule_utils.rhs_can_maybe_derive(new_rule.source, rule.source) and
rule_utils.rhs_can_maybe_derive(new_rule.target, rule.target)):
found_rules.append(rule)
return found_rules
def _has_balanced_parens(rhs):
"""Returns True if all '(' precede and are followed by a correspoding ')'."""
open_parens = 0
for token in rhs:
for char in token:
if char == "(":
open_parens += 1
elif char == ")":
open_parens -= 1
if open_parens < 0:
return False
return open_parens == 0
def _is_balanced_paren_candidate(rule):
if not _has_balanced_parens(rule.source):
return False
if not _has_balanced_parens(rule.target):
return False
return True
def _filter_unbalanced_paren_candidates(rules):
new_rules = set()
for rule in rules:
if _is_balanced_paren_candidate(rule):
new_rules.add(rule)
return new_rules
def _get_max_rule(search_state, config, examples):
"""Identify a rule to add that maximizes the decrease in codelength."""
# Dict of rule candidates to the codelenth savings
# (i.e. negative codelength delta).
candidates_to_delta = {}
# Map of rule candidates to the set of rules that they enable removing.
# (inverse of rules_to_candidates).
candidates_to_rules = collections.defaultdict(set)
for rule in search_state.current_rules:
candidates = search_state.rules_to_candidates[rule]
for candidate in candidates:
if candidate not in candidates_to_delta:
# Subtract cost of new rule if not already accounted for.
candidates_to_delta[candidate] = -codelength_utils.rule_codelength(
candidate, config)
# Add cost of every possible removed rule.
candidates_to_delta[candidate] += codelength_utils.rule_codelength(
rule, config)
candidates_to_rules[candidate].add(rule)
# Sort candidates by codelength reduction (prior to computing the codelength
# delta of the dataset encoding, which is relatively more expensive).
# Use lexical ordering to break ties.
candidates_to_delta_sorted = sorted(
candidates_to_delta.items(), key=lambda x: (-x[1], x[0]))
# For debugging, print up to the top 15 candidates.
print("Candidate rules:")
for rule, delta in candidates_to_delta_sorted[:15]:
print("%s (%s)" % (rule, delta))
min_delta = config.min_delta
max_rule_to_add = None
max_rules_to_remove = None
for rule, delta in candidates_to_delta_sorted:
if delta <= min_delta:
break
rules_to_remove = candidates_to_rules[rule]
targets_delta = codelength_utils.get_dataset_encoding_delta(
sample_size=config.parse_sample,
examples=examples,
current_rules=search_state.current_rules,
candidate_rule_to_add=rule,
candidate_rules_to_remove=rules_to_remove)
print("Targets encoding delta for %s: -%s" % (rule, targets_delta))
# Compute the full deta including both the codelength reduction of encoding
# the grammar (previously computed) and the codelength delta of encoding
# the targets with the new grammar.
delta -= targets_delta
if delta > min_delta:
min_delta = delta
max_rule_to_add = rule
max_rules_to_remove = rules_to_remove
return max_rule_to_add, max_rules_to_remove
def _update_state(affected_rules, search_state, config):
"""Sparsely update the state for rules that may be affected."""
for idx, affected_rule in enumerate(affected_rules):
# Debug logging every Nth rule.
if idx % 10 == 0:
print("Updating rule %s of %s." % (idx + 1, len(affected_rules)))
# Check if rule can now be generated. Ideally, this should have been
# determined upstream when determining which rules could be removed,
# but some cases are not caught until here, such as when source
# sequences contain repeated substrings and are therefore not considered
# by `get_candidates`.
# Regardless, it is still important to run this for the side-effect of
# updating the set of derivable rules.
if derivation_utils.can_derive(affected_rule, search_state.current_rules,
search_state.derivable_rules):
print("Can now generate: %s." % str(affected_rule))
search_state.current_rules.remove(affected_rule)
else:
candidates = split_utils.find_possible_splits(
affected_rule,
search_state.derivable_rules,
allow_repeated_target_nts=config.allow_repeated_target_nts,
)
if config.balance_parens:
candidates = _filter_unbalanced_paren_candidates(candidates)
for candidate in candidates:
search_state.rules_to_candidates[affected_rule].add(candidate)
print("Updates complete.")
def _induce_rules_for_examples(examples, seed_rules, config):
"""Iteratively searches for rules to optimize codelength objective."""
# Initialize the search state.
search_state = SearchState(
current_rules=seed_rules,
rules_to_candidates=collections.defaultdict(set),
derivable_rules=seed_rules.copy())
# Update state for all seed rules.
_update_state(seed_rules, search_state, config)
# Iteratively update grammar.
for iteration_num in range(config.max_iterations):
print("Iteration %s." % iteration_num)
rule, rules_to_remove = _get_max_rule(search_state, config, examples)
# Break if there is no candidate that improves codelength objective.
if rule is None:
print("Breaking as no candidate exceeds minimum threshold.")
break
# Otherwise, update the set of rules.
print("Adding rule: %s" % str(rule))
search_state.current_rules.add(rule)
search_state.derivable_rules.add(rule)
for rule_to_remove in rules_to_remove:
print("Removing rule: %s" % str(rule_to_remove))
search_state.current_rules.remove(rule_to_remove)
del search_state.rules_to_candidates[rule_to_remove]
print("Number of current_rules: %s" % len(search_state.current_rules))
# Update the search state based on any potentially affected rules.
# The set of affected rules includes any rule that the added rule
# may potentially be used in a derivation for.
affected_rules = _find_affected_rules(search_state.current_rules, rule)
_update_state(affected_rules, search_state, config)
# Return the set of induced rules.
return search_state.current_rules
def _example_to_rule(source_str, target_str):
"""Convert (source, target) example to a QCFGRule."""
return qcfg_rule.QCFGRule(
tuple(source_str.split()), tuple(target_str.split()), arity=0)
def _get_rules_for_other_examples(induced_rules, other_examples):
"""Add rules for examples outside of sample that cannot be derived."""
new_rules = set()
for source_str, target_str in other_examples:
goal_rule = qcfg_rule.QCFGRule(
tuple(source_str.split()), tuple(target_str.split()), arity=0)
if not derivation_utils.can_derive(goal_rule, induced_rules, None):
new_rules.add(goal_rule)
print("Added %s rules for examples outside of initial sample." %
len(new_rules))
return new_rules
def _split_examples(examples, config):
"""Split examples into a sampled and a remaining subset based on config."""
# Only consider unique examples.
# TODO(petershaw): Consider preserving the number of occurences for each
# unique example to better weight sampling for computing the dataset encoding
# codelength.
examples = list(set([tuple(example) for example in examples]))
if config.sample_size:
# Sort by number of input tokens.
examples_sorted = sorted(examples, key=lambda x: len(x[0].split()))
examples_sample = examples_sorted[:config.sample_size]
examples_other = examples_sorted[config.sample_size:]
else:
examples_sample = examples
examples_other = []
return examples_sample, examples_other
def induce_rules(examples, config):
"""Return set of induced rules for a given set of examples."""
# For effeciency, we only run grammar induction a subset of examples based
# on the sample size specified in the config.
print("Started to induce rules")
examples_sample, examples_other = _split_examples(examples, config)
print("Started to initialize examples")
# Initialize with a rule for each example.
seed_rules = set()
for source_str, target_str in examples_sample:
seed_rules.add(_example_to_rule(source_str, target_str))
print("Added %s seed rules for examples." % len(seed_rules))
# Optionally add exact match rules.
if config.seed_exact_match:
seed_rules |= exact_match_utils.get_exact_match_rules(examples_sample)
print("Seed rules after adding exact match rules for sampled examples: %s." %
len(seed_rules))
# Iteratively induce rules over the sampled set of examples.
induced_rules = _induce_rules_for_examples(examples_sample, seed_rules,
config)
print("Induced %s rules from sample of %s examples." %
(len(induced_rules), len(examples_sample)))
# Verify that induced grammar can derive all examples in examples_sample.
# We use the QCFG parser rather than `derivation_utils` as it is typically
# faster when we do not need to consider non-terminals in the goal strings,
# and to verify consistency of the algorithms.
for source_str, target_str in examples_sample:
if not qcfg_parser.can_parse(source_str, target_str, induced_rules):
raise ValueError("Induced rules cannot parse example: (%s, %s)" %
(source_str, target_str))
print("Checking %s remaining examples." % len(examples_other))
# Add rules for any examples that were not in the original sample and cannot
# be derived by the induced set of rules.
if examples_other:
if config.seed_exact_match:
induced_rules |= exact_match_utils.get_exact_match_rules(examples_other)
print("Rules after exact match for remaining examples: %s" %
len(induced_rules))
for source_str, target_str in examples_other:
if not qcfg_parser.can_parse(source_str, target_str, induced_rules):
induced_rules.add(_example_to_rule(source_str, target_str))
print("Rules after adding rules for unparsable remaining examples: %s" %
len(induced_rules))
return induced_rules
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/induction/induction_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for identifying candidate rules."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.induction import rule_utils
from model.qcfg import qcfg_rule
# Non-terminal with temporary index that is gauranteed to be unused in the
# current rule. This should be replaced with NT_1 or NT_2 to form a valid
# QCFGRule.
NT_TMP = "NT_?"
def _get_non_terminals(rhs):
"""Return set of non-terminal symbols in `rhs`."""
non_terminals = set()
for symbol in rhs:
if symbol in (qcfg_rule.NT_1, qcfg_rule.NT_2, NT_TMP):
non_terminals.add(symbol)
return non_terminals
def _get_tmp_nt_replacement(nts):
if nts == {NT_TMP}:
return qcfg_rule.NT_1
elif nts == {NT_TMP, qcfg_rule.NT_1}:
return qcfg_rule.NT_2
elif nts == {NT_TMP, qcfg_rule.NT_2}:
return qcfg_rule.NT_1
else:
raise ValueError("Unexpected NTs: %s" % nts)
def _replace_tmp_nt(source, target, nts):
new_nt = _get_tmp_nt_replacement(nts)
source = rule_utils.rhs_replace(source, [NT_TMP], new_nt)
target = rule_utils.rhs_replace(target, [NT_TMP], new_nt)
return source, target
def _make_rule(nts, source, target):
"""Canoncalize NT indexes and return QCFGRule."""
arity = len(nts)
source, target = rule_utils.canonicalize_nts(source, target, arity)
return qcfg_rule.QCFGRule(tuple(source), tuple(target), arity)
def _maybe_get_candidate_pair(source_g, source_h, target_g, target_h):
"""Returns candidate rule pair if proposed sources and targets are valid."""
# Check that proposed sources and targets contain same non-terminal indexes.
nts_g = _get_non_terminals(source_g)
if nts_g != _get_non_terminals(target_g):
return None
nts_h = _get_non_terminals(source_h)
if nts_h != _get_non_terminals(target_h):
return None
# Canonicalize non-terminal index ordering and return candidate pair.
source_g, target_g = _replace_tmp_nt(source_g, target_g, nts_g)
rule_g = _make_rule(nts_g, source_g, target_g)
rule_h = _make_rule(nts_h, source_h, target_h)
return (rule_g, rule_h)
def _get_split_candidates(rule, allow_repeated_target_nts=True):
"""Implements `SPLIT` procedure described in paper appendix.
To explain this function, let us review some notation for SCFGs/QCFGs.
Let `g` and `h` refer to QCFG rules. Let `=>_g` denote the application of
rule g, such that <a,b> `=>_g` <c,d> means
that <c,d> can be generated from <a,b> by applying the rule `g` to replace
some indexed non-terminal in <a,b>. Let
`=>_g =>_h` refer to a chain of rule applications of `g` and `h`, ommiting
the intermediate rule pair.
We can now define the behavoir of this function. Let `NT -> <a,b>` refer to
the input argument `rule`. The function returns the following set:
{(g,h) | <NT,NT> =>_g =>_h <a,b>}
In other words, we return pairs of rules that can generate the input `rule`.
We leave it to the caller to also consider the rule pair (h,g).
Certain restrictions also apply to the rule pairs that will be considered.
For example, if `rule` is:
NT -> <foo bar, bar foo>
Then the return set will include the following rule pair:
NT -> <NT_0 bar, bar NT_0>
NT -> <foo, foo>
Args:
rule: A QcfgRule.
allow_repeated_target_nts: Whether to allow repeated substrings to be
replaced with multiple non-terminals sharing the same index in target
sequences.
Returns:
List of rule pairs.
"""
candidate_pairs = []
# Consider all pairs of subspans in source and target to replace with
# a new non-terminal symbol.
for source_nt_start in range(len(rule.source)):
for source_nt_end in range(source_nt_start + 1, len(rule.source) + 1):
source_h = rule.source[source_nt_start:source_nt_end]
# Don't allow source_h to occur multiple times in rule.source.
# Otherwise this leads to an ambiguous selection between the occurences,
# so take the more conservative approach and disallow this.
if rule_utils.rhs_count(rule.source, source_h) > 1:
continue
# Don't allow source_h to only contain a single non-terminal.
if source_h == tuple([qcfg_rule.NT_1]) or source_h == tuple(
[qcfg_rule.NT_2]):
continue
source_g = (
rule.source[:source_nt_start] + tuple([NT_TMP]) +
rule.source[source_nt_end:])
# Don't allow source_g to only contain a single non-terminal.
if source_g == tuple([NT_TMP]):
continue
# Don't allow source_g to contain >2 non-terminals.
if qcfg_rule.NT_1 in source_g and qcfg_rule.NT_2 in source_g:
continue
for target_nt_start in range(len(rule.target)):
for target_nt_end in range(target_nt_start + 1, len(rule.target) + 1):
target_h = rule.target[target_nt_start:target_nt_end]
# Optionally allow target_h to occur multiple times in rule.target.
if rule_utils.rhs_count(rule.target, target_h) > 1:
if allow_repeated_target_nts:
target_g = rule_utils.rhs_replace(rule.target, target_h, NT_TMP)
else:
continue
else:
target_g = (
rule.target[:target_nt_start] + tuple([NT_TMP]) +
rule.target[target_nt_end:])
# Don't allow target_g to contain >2 non-terminals.
if qcfg_rule.NT_1 in target_g and qcfg_rule.NT_2 in target_g:
continue
candidate_pair = _maybe_get_candidate_pair(source_g, source_h,
target_g, target_h)
if candidate_pair:
candidate_pairs.append(candidate_pair)
return candidate_pairs
def find_possible_splits(rule, derivable_rules, allow_repeated_target_nts=True):
"""Implements `NEW` procedure described in paper appendix."""
candidates = _get_split_candidates(rule, allow_repeated_target_nts)
# Set of QCFGRules.
rule_candidates = set()
for rule_b, rule_c in candidates:
if rule_b in derivable_rules and rule_c not in derivable_rules:
# <NT, NT> =>_a == <NT, NT> =>_b =>_c where b is in derivable_rules.
rule_candidates.add(rule_c)
elif rule_c in derivable_rules and rule_b not in derivable_rules:
# <NT, NT> =>_a == <NT, NT> =>_b =>_c where c is in derivable_rules.
rule_candidates.add(rule_b)
return rule_candidates
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/induction/split_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various common functions related to QCFGRules."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.qcfg import qcfg_rule
def _swap_nt_order(rhs):
new_rhs = []
for symbol in rhs:
if symbol == qcfg_rule.NT_1:
new_rhs.append(qcfg_rule.NT_2)
elif symbol == qcfg_rule.NT_2:
new_rhs.append(qcfg_rule.NT_1)
else:
new_rhs.append(symbol)
return tuple(new_rhs)
def canonicalize_nts(source, target, arity):
"""Follows convention of source indexes being in order."""
if arity == 1:
if qcfg_rule.NT_2 in source:
source = rhs_replace(source, [qcfg_rule.NT_2], qcfg_rule.NT_1)
target = rhs_replace(target, [qcfg_rule.NT_2], qcfg_rule.NT_1)
elif arity == 2:
if qcfg_rule.NT_1 not in source or qcfg_rule.NT_2 not in source:
raise ValueError("Bad arity 2 source: %s" % (source,))
if source.index(qcfg_rule.NT_1) > source.index(qcfg_rule.NT_2):
source = _swap_nt_order(source)
target = _swap_nt_order(target)
return source, target
def rhs_count(list_to_search, sublist):
"""Returns count of occurances of sublist in list_to_search."""
if len(sublist) > len(list_to_search):
return 0
count = 0
for idx in range(len(list_to_search) - len(sublist) + 1):
if list_to_search[idx:idx + len(sublist)] == sublist:
count += 1
return count
def rhs_contains(list_to_search, sublist):
"""Returns True if sublist is contained in list_to_search."""
if len(sublist) > len(list_to_search):
return False
for idx in range(len(list_to_search) - len(sublist) + 1):
if list_to_search[idx:idx + len(sublist)] == sublist:
return True
return False
def rhs_can_maybe_derive(rhs_a, rhs_b):
"""Return False if rhs_a cannot be used in a derivation of rhs_b.
This function uses a fast approximation based on terminal sequence overlap
to identify cases where `rhs_a` could never be used in a derivation of
`rhs_b`.
For example, given `rhs_a`:
"foo NT foo NT"
There is a derivation that includes `rhs_a` that derives:
"foo bar bar foo NT"
But there is no derivation that includes `rhs_a` and derives:
"foo NT dax NT"
Args:
rhs_a: Tuple of strings for source or target of QCFGRule.
rhs_b: Same type as rhs_a.
Returns:
False if rhs_a cannot be used in a derivation of rhs_b.
"""
len_rhs_a = len(rhs_a)
len_rhs_b = len(rhs_b)
if len_rhs_a > len_rhs_b:
return False
if not rhs_a or not rhs_b:
return False
# Represent search state with backtracking.
rhs_a_idx_backtrack = 0
rhs_a_idx = 0
rhs_b_idx_backtrack = 0
rhs_b_idx = 0
while True:
if rhs_a_idx >= len_rhs_a:
# Completed matching all terminals.
return True
if rhs_b_idx >= len_rhs_b:
# Failed to match all terminal sequences.
return False
# Fail early if match cannot be made based on remaining length.
if (len_rhs_a - rhs_a_idx) > (len_rhs_b - rhs_b_idx):
return False
a_symbol = rhs_a[rhs_a_idx]
b_symbol = rhs_b[rhs_b_idx]
if a_symbol == b_symbol:
# Matched next terminal symbol, increment indexes.
rhs_a_idx += 1
rhs_b_idx += 1
elif a_symbol == qcfg_rule.NT_2 or a_symbol == qcfg_rule.NT_1:
# Completed matching terminal sequence.
# Increment backtrack indexes past this sequence.
rhs_a_idx += 1
rhs_a_idx_backtrack = rhs_a_idx
rhs_b_idx_backtrack = rhs_b_idx
else:
# Symbols do not match, backtrack.
rhs_a_idx = rhs_a_idx_backtrack
rhs_b_idx_backtrack += 1
rhs_b_idx = rhs_b_idx_backtrack
def rhs_replace(rhs, sublist, replacement):
"""Replace occurrences of sublist in rhs with replacement."""
sublist = tuple(sublist)
rhs = tuple(rhs)
if len(sublist) > len(rhs):
raise ValueError
if not sublist:
raise ValueError
new_list = []
idx = 0
while idx < len(rhs):
if rhs[idx:idx + len(sublist)] == sublist:
new_list.append(replacement)
idx += len(sublist)
else:
new_list.append(rhs[idx])
idx += 1
return tuple(new_list)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/induction/rule_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for computing codelengths over QCFG rules."""
import collections
import math
import random
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.induction import rule_utils
from model.qcfg import qcfg_parser
from model.qcfg import qcfg_rule
def rule_codelength(rule, config):
"""Computes the codelength for a given rule."""
length = 0.0
for token in rule.source + rule.target:
if token in {qcfg_rule.NT_1, qcfg_rule.NT_2}:
length += config.non_terminal_codelength
else:
length += config.terminal_codelength
return length
def _aggregate_counts(child_counts):
"""Return aggregated node count as int."""
if not child_counts:
return 1
elif len(child_counts) == 1:
return child_counts[0]
elif len(child_counts) == 2:
return child_counts[0] * child_counts[1]
else:
raise ValueError
def _get_num_all_derivations(source, rules, verbose):
"""Return total number of derivations for any target."""
def node_fn(unused_span_begin, unused_span_end, unused_rule, children):
"""Represent nodes as integer counts of possible derivations."""
return _aggregate_counts(children)
def postprocess_fn(nodes):
"""Merge and sum all nodes."""
return [sum(nodes)]
outputs = qcfg_parser.parse(
source,
rules,
node_fn=node_fn,
postprocess_cell_fn=postprocess_fn,
verbose=verbose)
if len(outputs) != 1:
raise ValueError
num_outputs = outputs[0]
return num_outputs
def _get_num_target_derivations(source, target, rules, verbose):
"""Return number of derivations of target."""
goal_target_string = " ".join(target)
def node_fn(unused_span_begin, unused_span_end, rule, children):
"""Represent nodes as (target string, int count of possible derivations)."""
target_strings = [target_string for target_string, _ in children]
new_target_string = qcfg_rule.apply_target(rule, target_strings)
child_counts = [child_count for _, child_count in children]
count = _aggregate_counts(child_counts)
return (new_target_string, count)
def postprocess_fn(nodes):
"""Discard nodes that cannot reach goal and aggregate counts."""
counts_dict = collections.defaultdict(int)
for target_string, count in nodes:
# Discard any targets that are not substrings of goal target.
if target_string not in goal_target_string:
continue
counts_dict[target_string] += count
return [
(target_string, count) for target_string, count in counts_dict.items()
]
outputs = qcfg_parser.parse(
source,
rules,
node_fn=node_fn,
postprocess_cell_fn=postprocess_fn,
verbose=verbose)
for target_string, count in outputs:
if target_string == goal_target_string:
return count
raise ValueError("No target derivation for example (%s, %s)" %
(source, target))
def _target_codelength(source, target, rules, verbose=False):
"""Return codelength for encoding `target` given `source` and `rules`.
The codelength of the target is computed as -log_2(P(y|x)).
For P(y|x) we use a naive uniform distribution over derivations, such that:
P(y|x) = # of derivations of <x,y> / # of derivations of <x,?>,
where ? is any target strings.
We therefore run a QCFG parser twice to determine the numberator and
denominator counts.
Args:
source: Tuple of source tokens.
target: Tuple of target tokens.
rules: Set of QCFGRule instances.
verbose: Print debug logging if True.
Returns:
Float representing codelength for encoding `target` given `source` and
`rules`.
"""
num_derivations = _get_num_all_derivations(source, rules, verbose=verbose)
num_target_derivations = _get_num_target_derivations(
source, target, rules, verbose=verbose)
# Note log(B/A) = -log(A/B).
codelength = math.log2(float(num_derivations) / float(num_target_derivations))
if verbose:
print("(%s, %s): %s derivations, %s target derivations, %s codelength" %
(source, target, num_derivations, num_target_derivations, codelength))
return codelength
def _find_relevant_examples(dataset, rule):
"""Find examples in `dataset` where `rule` could be used in a derivation."""
# TODO(petershaw): This could potentially be more effecient by pre-indexing
# the dataset sources in a data structure such as a Trie.
examples = []
for source_str, target_str in dataset:
source = source_str.split()
target = target_str.split()
if rule_utils.rhs_can_maybe_derive(rule.source, source):
examples.append((source, target))
return examples
def get_dataset_encoding_delta(sample_size,
examples,
current_rules,
candidate_rule_to_add,
candidate_rules_to_remove,
verbose=False):
"""Approximate increase in codelength to encode dataset."""
# Make a copy of the ruleset and add/remove candidates.
new_rules = current_rules.copy()
for rule_to_remove in candidate_rules_to_remove:
new_rules.remove(rule_to_remove)
new_rules.add(candidate_rule_to_add)
relevant_examples = _find_relevant_examples(examples, candidate_rule_to_add)
num_relevant_examples = len(relevant_examples)
sample = False
if verbose:
print("%s relevant rules." % num_relevant_examples)
# If configured, sample rules for effeciency.
if sample_size and num_relevant_examples > sample_size:
random.shuffle(relevant_examples)
relevant_examples = relevant_examples[:sample_size]
sample = True
# Compute the increase in target codelength summed across the sample.
delta = 0
for source, target in relevant_examples:
new_codelength = _target_codelength(
source, target, new_rules, verbose=verbose)
original_codelength = _target_codelength(
source, target, current_rules, verbose=verbose)
delta += (new_codelength - original_codelength)
# Estimate delta across entire set based on our sample.
if sample:
scale_factor = float(num_relevant_examples) / float(sample_size)
delta *= scale_factor
if verbose:
print("Scaling delta by %s." % scale_factor)
if verbose:
print("Delta: %s." % delta)
return delta
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/induction/codelength_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for identifying identical substrings in sources and targets."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.qcfg import qcfg_rule
def _in_matched_range(start_idx, end_idx, matched_ranges):
"""Return True if provided indices overlap any spans in matched_ranges."""
for range_start_idx, range_end_idx in matched_ranges:
if not (end_idx <= range_start_idx or start_idx >= range_end_idx):
return True
return False
def _find_exact_matches(source, target):
"""Returns longest non-overlapping sub-strings shared by source and target."""
source_len = len(source)
target_len = len(target)
matches = set()
matched_source_ranges = set()
matched_target_ranges = set()
for sequence_len in range(max(target_len, source_len), 0, -1):
for source_start_idx in range(0, source_len - sequence_len + 1):
source_end_idx = source_start_idx + sequence_len
if _in_matched_range(source_start_idx, source_end_idx,
matched_source_ranges):
continue
for target_start_idx in range(0, target_len - sequence_len + 1):
target_end_idx = target_start_idx + sequence_len
if _in_matched_range(target_start_idx, target_end_idx,
matched_target_ranges):
continue
source_span = source[source_start_idx:source_end_idx]
target_span = target[target_start_idx:target_end_idx]
if source_span == target_span:
matches.add(tuple(source_span))
matched_source_ranges.add((source_start_idx, source_end_idx))
matched_target_ranges.add((target_start_idx, target_end_idx))
return matches
def get_exact_match_rules(dataset):
"""Return set of rules for terminal sequences in both source and target."""
matches = set()
for source_str, target_str in dataset:
source = source_str.split()
target = target_str.split()
matches.update(_find_exact_matches(source, target))
exact_match_rules = set()
for match in matches:
rule = qcfg_rule.QCFGRule(source=tuple(match), target=tuple(match), arity=0)
exact_match_rules.add(rule)
return exact_match_rules
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/induction/exact_match_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Induce and write QCFG rules."""
from absl import app
from absl import flags
import pdb
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.induction import induction_utils
from model.qcfg import qcfg_file
from tasks import tsv_utils
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file of examples.")
flags.DEFINE_string("output", "", "Output rule txt file.")
flags.DEFINE_integer("sample_size", 500,
"Number of examples to sample for induction.")
flags.DEFINE_integer("max_iterations", 10000,
"Maximum number of grammar induction iterations.")
flags.DEFINE_integer("min_delta", 0,
"Minimum codelength delta to add a new rule.")
flags.DEFINE_integer("terminal_codelength", 32,
"Codelength coeffecient for terminals.")
flags.DEFINE_integer("non_terminal_codelength", 1,
"Codelength coeffecient for non-terminals.")
flags.DEFINE_integer(
"parse_sample", 10,
"Number of examples to sample for estimating target encoding codelength.")
flags.DEFINE_bool(
"allow_repeated_target_nts", True,
"Whether to allow multiple non-terminals with same index in targets.")
flags.DEFINE_bool("seed_exact_match", True,
"Whether to seed induction with exact match rules.")
flags.DEFINE_bool("balance_parens", True,
"Whether to require rules to have balanced parentheses.")
def induce_and_write_rules():
"""Induce and write set of rules."""
examples = tsv_utils.read_tsv(FLAGS.input)
config = induction_utils.InductionConfig(
sample_size=FLAGS.sample_size,
max_iterations=FLAGS.max_iterations,
min_delta=FLAGS.min_delta,
terminal_codelength=FLAGS.terminal_codelength,
non_terminal_codelength=FLAGS.non_terminal_codelength,
parse_sample=FLAGS.parse_sample,
allow_repeated_target_nts=FLAGS.allow_repeated_target_nts,
seed_exact_match=FLAGS.seed_exact_match,
balance_parens=FLAGS.balance_parens,
)
print("In induce rules main")
induced_rules = induction_utils.induce_rules(examples, config)
qcfg_file.write_rules(induced_rules, FLAGS.output)
pdb.set_trace()
def main(unused_argv):
induce_and_write_rules()
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/induction/induce_rules.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for applying rules to produce derivations.
Note that in this module we will reuse the QCFGRule tuple to represent both
derived string pairs and QCFG rules. Since we only allow a single LHS
non-terminal, both concepts can be represented as a pair of source and target
sequences. Therefore, we abuse terminology and refer to each concept
interchangeably in certain contexts.
"""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.induction import rule_utils
from model.qcfg import qcfg_rule
def _substitute(rhs_a, rhs_b, nt=qcfg_rule.NT_1):
"""Replace nt in rhs_a with rhs_b, re-indexing non-terminals if needed."""
output = []
for token in rhs_a:
if token == nt and nt == qcfg_rule.NT_2:
# Our goal is to replace NT_2 in rhs_a with rhs_b, but we need to
# do some re-indexing to avoid collisions.
# First, we re-index NT_1 in rhs_b to NT_2.
# Based on the logic in `apply`, if rhs_a has arity 2, then rhs_b
# will have arity < 2, i.e. will not contain NT_2.
rhs_b = rule_utils.rhs_replace(rhs_b, [qcfg_rule.NT_1], qcfg_rule.NT_2)
# We can now safely replace NT_2 in rhs_a with rhs_b, which should
# contain only NT_2.
output.extend(rhs_b)
elif token == nt:
# Replace NT_1 in rhs_a with rhs_b.
# Based on the logic in `apply`, no collisions on non-terminal indexes
# should occur, since we should either be in the case:
# 1. rhs_a only has NT_1, and rhs_b has NT_1 and NT_2
# 2. rhs_a has NT_1 and NT_2, but rhs_b only has NT_1
output.extend(rhs_b)
else:
output.append(token)
return output
def _apply(rule_a, rule_b):
"""Applies rule_b to rule_a, returning set of derived rules."""
outputs = []
if rule_a.arity == 2:
new_arity = 1 + rule_b.arity
if new_arity <= 2:
# Cannot apply an arity 2 rule to an arity 2 rule because this would lead
# to a rule with 3 different non-terminal indexes, which is disallowed
# by our QCFG conventions.
source_0 = _substitute(rule_a.source, rule_b.source)
target_0 = _substitute(rule_a.target, rule_b.target)
outputs.append((source_0, target_0, new_arity))
# Rule can potentially be applied to either non-terminal in rule_a.
source_1 = _substitute(rule_a.source, rule_b.source, nt=qcfg_rule.NT_2)
target_1 = _substitute(rule_a.target, rule_b.target, nt=qcfg_rule.NT_2)
outputs.append((source_1, target_1, new_arity))
elif rule_a.arity == 1:
new_arity = rule_b.arity
source = _substitute(rule_a.source, rule_b.source)
target = _substitute(rule_a.target, rule_b.target)
outputs.append((source, target, new_arity))
output_rules = set()
for source, target, arity in outputs:
source, target = rule_utils.canonicalize_nts(source, target, arity)
output_rules.add(qcfg_rule.QCFGRule(tuple(source), tuple(target), arity))
return output_rules
def _can_maybe_derive_from(rule, goal_rule):
"""Return True if rule can potentially be used to derive goal_rule."""
# Don't allow 'reflexive' derivations.
if rule == goal_rule:
return False
if not rule_utils.rhs_can_maybe_derive(rule.source, goal_rule.source):
return False
if not rule_utils.rhs_can_maybe_derive(rule.target, goal_rule.target):
return False
return True
def _filter_rules(rules, goal_rule):
return [rule for rule in rules if _can_maybe_derive_from(rule, goal_rule)]
def _verify_arity(rule):
"""Raise ValueError if rule does not follow valid arity convention."""
if rule.arity == 0:
if qcfg_rule.NT_1 in rule.source:
raise ValueError("Invalid rule: %s" % (rule,))
if qcfg_rule.NT_2 in rule.source:
raise ValueError("Invalid rule: %s" % (rule,))
elif rule.arity == 1:
if qcfg_rule.NT_1 not in rule.source:
raise ValueError("Invalid rule: %s" % (rule,))
if qcfg_rule.NT_2 in rule.source:
raise ValueError("Invalid rule: %s" % (rule,))
elif rule.arity == 2:
if qcfg_rule.NT_1 not in rule.source:
raise ValueError("Invalid rule: %s" % (rule,))
if qcfg_rule.NT_2 not in rule.source:
raise ValueError("Invalid rule: %s" % (rule,))
return True
def can_derive(goal_rule,
rules,
derived_rules=None,
max_iter=15,
verbose=False):
"""Return True if `goal_rule` can be derived given `rules`.
We perform a relatively naive breadth first search (BFS), with early pruning
in cases where it can be quickly determined that an intermediate result
cannot be used in a derivation of our goal.
Args:
goal_rule: A QCFGRule representing a string pair to derive.
rules: A set of QCFGRules.
derived_rules: If not None, will add any derived QCFGRules that can
potentially derive `goal_rule` given some substitution to this set.
max_iter: Maximum number of iterations (i.e. derivation depth) for
attempting to derive `goal_rule`.
verbose: Print debugging logging if True.
Returns:
True if `goal_rule` can be derived given `rules`.
"""
# Filter rules to the set that can potentially be used in a derivation
# of `goal_rule`.
filtered_rules = _filter_rules(rules, goal_rule)
if verbose:
print("filtered_rules: %s" % filtered_rules)
# Track seen rules.
seen_rules = set(filtered_rules)
# Set of derived rules with derivation depth equal to iteration.
search_state = set(filtered_rules)
for _ in range(max_iter):
if not search_state:
if verbose:
print("Cannot derive %s." % str(goal_rule))
return False
if verbose:
print("Starting next iteration with search_state:")
for rule in search_state:
print(rule)
new_search_state = set()
for rule_a in search_state:
# Attempt to apply every relevant rule to every rule in search_state.
for rule_b in filtered_rules:
new_rules = _apply(rule_a, rule_b)
if verbose:
print("Applying %s to %s yields %s" % (rule_b, rule_a, new_rules))
for new_rule in new_rules:
# Check that application has not led to a malformed rule.
_verify_arity(new_rule)
if new_rule in seen_rules:
continue
seen_rules.add(new_rule)
if goal_rule == new_rule:
if verbose:
print("Derived %s." % str(goal_rule))
return True
# If the generated rule can be potentially used in a derivation of
# our goal, then add to the search state for the next iteration.
if _can_maybe_derive_from(new_rule, goal_rule):
if derived_rules is not None:
derived_rules.add(new_rule)
new_search_state.add(new_rule)
else:
if verbose:
print("Cannot derive goal from: %s" % str(new_rule))
search_state = new_search_state
# For the datasets we have studied, this limit should not generally apply.
print("Reached max iterations for rule `%s` given rules `%s`" %
(goal_rule, filtered_rules))
return False
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/induction/derivation_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for permute MNIST experiment.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import time
import datetime
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import construct_permute_mnist
from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, update_reservior
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 10 # Number of experiments to average over
TRAIN_ITERS = 5000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 1e-3
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_POWER = 0.9
OPT_MOMENTUM = 0.9
VALID_ARCHS = ['FC-S', 'FC-B']
ARCH = 'FC-S'
## Model options
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'A-GEM', 'S-GEM', 'FTR_EXT', 'PNN', 'ER'] #List of valid models
IMP_METHOD = 'EWC'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 10 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 25 # Number of samples per task
INPUT_FEATURE_SIZE = 784
IMG_HEIGHT = 28
IMG_WIDTH = 28
IMG_CHANNELS = 1
TOTAL_CLASSES = 10 # Total number of classes in the dataset
EPS_MEM_BATCH_SIZE = 256
DEBUG_EPISODIC_MEMORY = False
USE_GPU = True
K_FOR_CROSS_VAL = 3
TIME_MY_METHOD = False
COUNT_VIOLATIONS = False
MEASURE_PERF_ON_EPS_MEMORY = False
## Logging, saving and testing options
LOG_DIR = './permute_mnist_results'
## Evaluation options
## Num Tasks
NUM_TASKS = 20
MULTI_TASK = False
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for permutted mnist experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--arch", type=str, default=ARCH, help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Number of samples per class from previous tasks.")
parser.add_argument("--eps-mem-batch", type=int, default=EPS_MEM_BATCH_SIZE,
help="Number of samples per class from previous tasks.")
parser.add_argument("--examples-per-task", type=int, default=1000,
help="Number of examples per task.")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, args):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
batch_size = args.batch_size
if model.imp_method == 'A-GEM' or model.imp_method == 'ER':
use_episodic_memory = True
else:
use_episodic_memory = False
# Loop over number of runs to average over
for runid in range(args.num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(args.random_seed+runid)
# Load the permute mnist dataset
datasets = construct_permute_mnist(model.num_tasks)
episodic_mem_size = args.mem_size*model.num_tasks*TOTAL_CLASSES
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
# List to store the classes that we have so far - used at test time
test_labels = np.arange(TOTAL_CLASSES)
if use_episodic_memory:
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, INPUT_FEATURE_SIZE])
episodic_labels = np.zeros([episodic_mem_size, TOTAL_CLASSES])
count_cls = np.zeros(TOTAL_CLASSES, dtype=np.int32)
episodic_filled_counter = 0
examples_seen_so_far = 0
# Mask for softmax
# Since all the classes are present in all the tasks so nothing to mask
logit_mask = np.ones(TOTAL_CLASSES)
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
pnn_logit_mask = np.ones([model.num_tasks, TOTAL_CLASSES])
if COUNT_VIOLATIONS:
violation_count = np.zeros(model.num_tasks)
vc = 0
# Training loop for all the tasks
for task in range(len(datasets)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0 and model.imp_method != 'PNN'):
model.restore(sess)
# Extract training images and labels for the current task
task_train_images = datasets[task]['train']['images']
task_train_labels = datasets[task]['train']['labels']
# If multi_task is set the train using datasets of all the tasks
if MULTI_TASK:
if task == 0:
for t_ in range(1, len(datasets)):
task_train_images = np.concatenate((task_train_images, datasets[t_]['train']['images']), axis=0)
task_train_labels = np.concatenate((task_train_labels, datasets[t_]['train']['labels']), axis=0)
else:
# Skip training for this task
continue
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
total_train_examples = task_train_images.shape[0]
# Randomly suffle the training examples
perm = np.arange(total_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm][:args.examples_per_task]
train_y = task_train_labels[perm][:args.examples_per_task]
task_sample_weights = task_sample_weights[perm][:args.examples_per_task]
print('Received {} images, {} labels at task {}'.format(train_x.shape[0], train_y.shape[0], task))
# Array to store accuracies when training for task T
ftask = []
num_train_examples = train_x.shape[0]
# Train a task observing sequence of data
if args.train_single_epoch:
num_iters = num_train_examples // batch_size
else:
num_iters = args.train_iters
# Training loop for task T
for iters in range(num_iters):
if args.train_single_epoch and not args.cross_validate_mode:
if (iters < 10) or (iters < 100 and iters % 10 == 0) or (iters % 100 == 0):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets, args.online_cross_val)
ftask.append(fbatch)
offset = (iters * batch_size) % (num_train_examples - batch_size)
residual = batch_size
if model.imp_method == 'PNN':
pnn_train_phase[:] = False
pnn_train_phase[task] = True
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_[task]: train_y[offset:offset+batch_size],
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
else:
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_: train_y[offset:offset+batch_size],
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: True}
if model.imp_method == 'VAN':
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PNN':
feed_dict[model.task_id] = task
_, loss = sess.run([model.train[task], model.unweighted_entropy[task]], feed_dict=feed_dict)
elif model.imp_method == 'FTR_EXT':
if task == 0:
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train_classifier, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC':
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
if episodic_filled_counter <= args.eps_mem_batch:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, args.eps_mem_batch, replace=False) # Sample without replacement so that we don't sample an example more than once
# Store the reference gradient
sess.run(model.store_ref_grads, feed_dict={model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask],
model.keep_prob: 1.0, model.output_mask: logit_mask, model.train_phase: True})
if COUNT_VIOLATIONS:
vc, _, loss = sess.run([model.violation_count, model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
else:
# Compute the gradient for current task and project if need be
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
# Put the batch in the ring buffer
for er_x, er_y_ in zip(train_x[offset:offset+residual], train_y[offset:offset+residual]):
cls = np.unique(np.nonzero(er_y_))[-1]
# Write the example at the location pointed by count_cls[cls]
cls_to_index_map = cls
with_in_task_offset = args.mem_size * cls_to_index_map
mem_index = count_cls[cls] + with_in_task_offset + episodic_filled_counter
episodic_images[mem_index] = er_x
episodic_labels[mem_index] = er_y_
count_cls[cls] = (count_cls[cls] + 1) % args.mem_size
elif model.imp_method == 'RWALK':
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'ER':
mem_filled_so_far = examples_seen_so_far if (examples_seen_so_far < episodic_mem_size) else episodic_mem_size
if mem_filled_so_far < args.eps_mem_batch:
er_mem_indices = np.arange(mem_filled_so_far)
else:
er_mem_indices = np.random.choice(mem_filled_so_far, args.eps_mem_batch, replace=False)
np.random.shuffle(er_mem_indices)
# Train on a batch of episodic memory first
er_train_x_batch = np.concatenate((episodic_images[er_mem_indices], train_x[offset:offset+residual]), axis=0)
er_train_y_batch = np.concatenate((episodic_labels[er_mem_indices], train_y[offset:offset+residual]), axis=0)
feed_dict = {model.x: er_train_x_batch, model.y_: er_train_y_batch,
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: True}
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
for er_x, er_y_ in zip(train_x[offset:offset+residual], train_y[offset:offset+residual]):
update_reservior(er_x, er_y_, episodic_images, episodic_labels, episodic_mem_size, examples_seen_so_far)
examples_seen_so_far += 1
if (iters % 100 == 0):
print('Step {:d} {:.3f}'.format(iters, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs Nans!!!')
sys.exit(0)
print('\t\t\t\tTraining for Task%d done!'%(task))
# Upaate the episodic memory filled counter
if use_episodic_memory:
episodic_filled_counter += args.mem_size * TOTAL_CLASSES
if model.imp_method == 'A-GEM' and COUNT_VIOLATIONS:
violation_count[task] = vc
print('Task {}: Violation Count: {}'.format(task, violation_count))
sess.run(model.reset_violation_count, feed_dict=feed_dict)
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if (task < (len(datasets) - 1)) or MEASURE_PERF_ON_EPS_MEMORY:
model.task_updates(sess, task, task_train_images, np.arange(TOTAL_CLASSES))
print('\t\t\t\tTask updates after Task%d done!'%(task))
if args.train_single_epoch and not args.cross_validate_mode:
fbatch = test_task_sequence(model, sess, datasets, False)
ftask.append(fbatch)
ftask = np.array(ftask)
else:
if MEASURE_PERF_ON_EPS_MEMORY:
eps_mem = {
'images': episodic_images,
'labels': episodic_labels,
}
# Measure perf on episodic memory
ftask = test_task_sequence(model, sess, eps_mem, args.online_cross_val)
else:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets, args.online_cross_val)
# Store the accuracies computed at task T in a list
evals.append(ftask)
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
runs.append(np.array(evals))
# End for loop runid
runs = np.array(runs)
return runs
def test_task_sequence(model, sess, test_data, cross_validate_mode):
"""
Snapshot the current performance
"""
if TIME_MY_METHOD:
# Only compute the training time
return np.zeros(model.num_tasks)
list_acc = []
if model.imp_method == 'PNN':
pnn_logit_mask = np.ones([model.num_tasks, TOTAL_CLASSES])
else:
logit_mask = np.ones(TOTAL_CLASSES)
if MEASURE_PERF_ON_EPS_MEMORY:
for task in range(model.num_tasks):
mem_offset = task*SAMPLES_PER_CLASS*TOTAL_CLASSES
feed_dict = {model.x: test_data['images'][mem_offset:mem_offset+SAMPLES_PER_CLASS*TOTAL_CLASSES],
model.y_: test_data['labels'][mem_offset:mem_offset+SAMPLES_PER_CLASS*TOTAL_CLASSES], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False}
acc = model.accuracy.eval(feed_dict = feed_dict)
list_acc.append(acc)
print(list_acc)
return list_acc
for task, _ in enumerate(test_data):
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
feed_dict = {model.x: test_data[task]['test']['images'],
model.y_[task]: test_data[task]['test']['labels'], model.keep_prob: 1.0}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
acc = model.accuracy[task].eval(feed_dict = feed_dict)
else:
feed_dict = {model.x: test_data[task]['test']['images'],
model.y_: test_data[task]['test']['labels'], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False}
acc = model.accuracy.eval(feed_dict = feed_dict)
list_acc.append(acc)
return list_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'DATASET': 'PERMUTE_MNIST',
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': args.imp_method,
'SYNAP_STGTH': args.synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': args.learning_rate,
'BATCH_SIZE': args.batch_size,
'MEM_SIZE': args.mem_size}
experiment_id = "PERMUTE_MNIST_HERDING_%s_%s_%s_%s_%r_%s-"%(args.arch, args.train_single_epoch, args.imp_method, str(args.synap_stgth).replace('.', '_'),
str(args.batch_size), str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Get the subset of data depending on training or cross-validation mode
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = NUM_TASKS - K_FOR_CROSS_VAL
# Variables to store the accuracies and standard deviations of the experiment
acc_mean = dict()
acc_std = dict()
# Reset the default graph
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(args.random_seed)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, INPUT_FEATURE_SIZE])
#x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
if args.imp_method == 'PNN':
y_ = []
for i in range(num_tasks):
y_.append(tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES]))
else:
y_ = tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES])
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)
elif args.optim == 'MOMENTUM':
base_lr = tf.constant(args.learning_rate)
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(args.learning_rate, OPT_MOMENTUM)
# Create the Model/ contruct the graph
model = Model(x, y_, num_tasks, opt, args.imp_method, args.synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch)
# Set up tf session and initialize variables.
if USE_GPU:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
else:
config = tf.ConfigProto(
device_count = {'GPU': 0}
)
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
runs = train_task_sequence(model, sess, args)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
# If cross-validation flag is enabled, store the stuff in a text file
if args.cross_validate_mode:
acc_mean = runs.mean(0)
acc_std = runs.std(0)
cross_validate_dump_file = args.log_dir + '/' + 'PERMUTE_MNIST_%s_%s'%(args.imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
if MULTI_TASK:
f.write('GPU:{} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(USE_GPU, args.arch, args.learning_rate,
args.synap_stgth, acc_mean[-1, :].mean()))
else:
f.write('GPU: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {} \t Fgt: {} \t Time: {}\n'.format(USE_GPU, args.arch, args.learning_rate,
args.synap_stgth, acc_mean[-1, :].mean(), compute_fgt(acc_mean), str(time_spent)))
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
if __name__ == '__main__':
main()
|
agem-main
|
fc_permute_mnist.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for split AWA experiment.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import random
import time
import datetime
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import image_scaling, random_crop_and_pad_image, random_horizontal_flip, construct_split_awa
from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, load_task_specific_data, load_task_specific_data_in_proportion
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 5 # Number of experiments to average over
TRAIN_ITERS = 2000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 0.1
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_MOMENTUM = 0.9
OPT_POWER = 0.9
VALID_ARCHS = ['CNN', 'VGG', 'RESNET-B']
ARCH = 'RESNET-B'
PRETRAIN = False
## Model options
#MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'M-EWC', 'GEM', 'A-GEM', 'S-GEM'] #List of valid models
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'A-GEM'] #List of valid models
IMP_METHOD = 'VAN'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 50 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 20 # Number of samples per task
IMG_HEIGHT = 224
IMG_WIDTH = 224
IMG_CHANNELS = 3
TOTAL_CLASSES = 50 # Total number of classes in the dataset
MEASURE_CONVERGENCE_AFTER = 0.9
EPS_MEM_BATCH_SIZE = 128
DEBUG_EPISODIC_MEMORY = False
KEEP_EPISODIC_MEMORY_FULL = False
K_FOR_CROSS_VAL = 3
CLASSES_PER_TASK = 5
## Logging, saving and testing options
LOG_DIR = './split_awa_results'
SNAPSHOT_DIR = './awa_snapshots/sgd'
SAVE_MODEL_PARAMS = False
RESNET18_IMAGENET_CHECKPOINT = './resnet-18-pretrained-imagenet/model.ckpt'
## Evaluation options
## Task split
NUM_TASKS = 20
MULTI_TASK = False
## Dataset specific options
DATA_DIR= './AWA_data/Animals_with_Attributes2/'
AWA_TRAIN_LIST = './dataset_lists/AWA_train_list.txt'
AWA_VAL_LIST = './dataset_lists/AWA_val_list.txt'
AWA_TEST_LIST = './dataset_lists/AWA_test_list.txt'
#AWA_TRAIN_LIST = './dataset_lists/tmp_list_awa.txt'
#AWA_VAL_LIST = './dataset_lists/tmp_list_awa.txt'
#AWA_TEST_LIST = './dataset_lists/tmp_list_awa.txt'
# Define function to load/ store training weights. We will use ImageNet initialization later on
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for split AWA experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--arch", type=str, default=ARCH,
help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--do-sampling", action="store_true",
help="Whether to do sampling")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Number of samples per class from previous tasks.")
parser.add_argument("--is-herding", action="store_true",
help="Herding based sampling")
parser.add_argument("--data-dir", type=str, default=DATA_DIR,
help="Directory from where the AWA data will be read.\
NOTE: Provide path till <AWA_DIR>/Animals_with_Attributes2")
parser.add_argument("--init-checkpoint", type=str, default=RESNET18_IMAGENET_CHECKPOINT,
help="Path to TF checkpoint file or npz file containing initialization for ImageNet.\
NOTE: NPZ file for VGG and TF checkpoint for ResNet")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, saver, datasets, cross_validate_mode, train_single_epoch, do_sampling, is_herding,
episodic_mem_size, train_iters, batch_size, num_runs, init_checkpoint, online_cross_val, random_seed):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
task_labels_dataset = []
break_training = 0
# Loop over number of runs to average over
for runid in range(num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(random_seed+runid)
random.seed(random_seed+runid)
# Get the task labels from the total number of tasks and full label space
task_labels = []
classes_per_task = CLASSES_PER_TASK
classes_appearing_in_tasks = dict()
for cls in range(TOTAL_CLASSES):
classes_appearing_in_tasks[cls] = 0
if online_cross_val:
label_array = np.arange(TOTAL_CLASSES)
for tt in range(model.num_tasks):
offset = tt * classes_per_task
task_labels.append(list(label_array[offset:offset+classes_per_task]))
else:
for tt in range(model.num_tasks):
task_labels.append(random.sample(range(K_FOR_CROSS_VAL*classes_per_task, TOTAL_CLASSES), classes_per_task))
for lab in task_labels[tt]:
classes_appearing_in_tasks[lab] += 1
print('Task: {}, Labels:{}'.format(tt, task_labels[tt]))
print('Class frequency in Tasks: {}'.format(classes_appearing_in_tasks))
# Store the task labels
task_labels_dataset.append(task_labels)
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
if PRETRAIN:
# Load the variables from a checkpoint
if model.network_arch == 'RESNET-B':
# Define loader (weights which will be loaded from a checkpoint)
restore_vars = [v for v in model.trainable_vars if 'fc' not in v.name]
loader = tf.train.Saver(restore_vars)
load(loader, sess, init_checkpoint)
elif model.network_arch == 'VGG':
# Load the pretrained weights from the npz file
weights = np.load(init_checkpoint)
keys = sorted(weights.keys())
for i, key in enumerate(keys[:-2]): # Load everything except the last layer
sess.run(model.trainable_vars[i].assign(weights[key]))
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
if model.imp_method == 'S-GEM':
# List to store the episodic memories of the previous tasks
task_based_memory = []
if model.imp_method == 'A-GEM':
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
episodic_labels = np.zeros([episodic_mem_size, model.num_tasks*TOTAL_CLASSES])
episodic_filled_counter = 0
a_gem_logit_mask = np.zeros([model.num_tasks, model.total_classes])
if do_sampling:
# List to store important samples from the previous tasks
last_task_x = None
last_task_y_ = None
# Mask for softmax
logit_mask = np.zeros(model.total_classes)
max_batch_dimension = 500
# Dict to store the number of times a class has already been seen in the training
class_seen_already = dict()
for cls in range(TOTAL_CLASSES):
class_seen_already[cls] = 0
# Training loop for all the tasks
for task in range(len(task_labels)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0):
model.restore(sess)
# Increment the class seen count
for cls in task_labels[task]:
class_seen_already[cls] += 1
# Load the task specific dataset
task_train_images, task_train_labels = load_task_specific_data_in_proportion(datasets[0]['train'], task_labels[task], classes_appearing_in_tasks, class_seen_already)
print('Received {} images, {} labels at task {}'.format(task_train_images.shape[0], task_train_labels.shape[0], task))
print('Unique labels in the task: {}'.format(np.unique(np.nonzero(task_train_labels)[1])))
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
num_train_examples = task_train_images.shape[0]
logit_mask[:] = 0
# Train a task observing sequence of data
if train_single_epoch:
# Ceiling operation
num_iters = (num_train_examples + batch_size - 1) // batch_size
else:
num_iters = train_iters
logit_mask_offset = task * TOTAL_CLASSES
classes_adjusted_for_head = [cls + logit_mask_offset for cls in task_labels[task]]
logit_mask[classes_adjusted_for_head] = 1.0
# Randomly suffle the training examples
perm = np.arange(num_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm]
train_y = task_train_labels[perm]
task_sample_weights = task_sample_weights[perm]
# Array to store accuracies when training for task T
if cross_validate_mode:
# Because we will evalaute at the end
ftask = 0
elif train_single_epoch:
# Because we will evaluate after every mini-batch of every task
ftask = np.zeros([max_batch_dimension+1, model.num_tasks])
batch_dim_count = 0
else:
# Because we will evaluate after every task
ftask = []
# Number of iterations after which convergence is checked
convergence_iters = int(num_iters * MEASURE_CONVERGENCE_AFTER)
final_train_labels = np.zeros([batch_size, model.total_classes])
head_offset = task * TOTAL_CLASSES
# Training loop for task T
for iters in range(num_iters):
if train_single_epoch and not cross_validate_mode:
if (iters < 11):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, online_cross_val)
ftask[batch_dim_count] = fbatch
# Increment the batch_dim_count
batch_dim_count += 1
# Set the output labels over which the model needs to be trained
if model.imp_method == 'A-GEM':
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][classes_adjusted_for_head] = 1.0
else:
logit_mask[:] = 0
logit_mask[classes_adjusted_for_head] = 1.0
if train_single_epoch:
offset = iters * batch_size
if (offset+batch_size <= num_train_examples):
residual = batch_size
else:
residual = num_train_examples - offset
final_train_labels[:residual, head_offset:head_offset+TOTAL_CLASSES] = train_y[offset:offset+residual]
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: final_train_labels[:residual],
model.sample_weights: task_sample_weights[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
else:
offset = (iters * batch_size) % (num_train_examples - batch_size)
final_train_labels[:, head_offset:head_offset+TOTAL_CLASSES] = train_y[offset:offset+residual]
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_: final_train_labels,
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
if model.imp_method == 'VAN':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC' or model.imp_method == 'M-EWC':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
if (iters >= convergence_iters) and (model.imp_method == 'M-EWC'):
_, _, _, _, loss = sess.run([model.weights_old_ops_grouped, model.set_tmp_fisher, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
else:
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
feed_dict[model.output_mask] = logit_mask
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'S-GEM':
if task == 0:
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
# Randomly sample a task from the previous tasks
prev_task = np.random.randint(0, task)
# Set the logit mask for the randomly sampled task
logit_mask[:] = 0
logit_mask[task_labels[prev_task]] = 1.0
# Store the reference gradient
sess.run(model.store_ref_grads, feed_dict={model.x: task_based_memory[prev_task]['images'], model.y_: task_based_memory[prev_task]['labels'],
model.keep_prob: 1.0, model.output_mask: logit_mask, model.train_phase: True})
# Compute the gradient for current task and project if need be
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][classes_adjusted_for_head] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
# Reset the reference gradients
# Set the mask for all the previous tasks so far
a_gem_logit_mask[:] = 0
for tt in range(task):
logit_mask_offset = tt * TOTAL_CLASSES
classes_adjusted_for_head = [cls + logit_mask_offset for cls in task_labels[tt]]
a_gem_logit_mask[tt][classes_adjusted_for_head] = 1.0
if KEEP_EPISODIC_MEMORY_FULL:
mem_sample_mask = np.random.choice(episodic_mem_size, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
else:
if episodic_filled_counter <= EPS_MEM_BATCH_SIZE:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
ref_feed_dict = {model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask],
model.keep_prob: 1.0, model.train_phase: True}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
ref_feed_dict.update(logit_mask_dict)
ref_feed_dict[model.mem_batch_size] = float(len(mem_sample_mask))
sess.run(model.store_ref_grads, feed_dict=ref_feed_dict)
# Compute the gradient for current task and project if need be
a_gem_logit_mask[:] = 0
logit_mask_offset = task * TOTAL_CLASSES
classes_adjusted_for_head = [cls + logit_mask_offset for cls in task_labels[task]]
a_gem_logit_mask[task][classes_adjusted_for_head] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'RWALK':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
if (iters % 100 == 0):
print('Step {:d} {:.3f}'.format(iters, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs NaNs!!!')
break_training = 1
break
print('\t\t\t\tTraining for Task%d done!'%(task))
if break_training:
break
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if task < (len(task_labels) - 1):
model.task_updates(sess, task, task_train_images, task_labels[task]) # TODO: For MAS, should the gradients be for current task or all the previous tasks
print('\t\t\t\tTask updates after Task%d done!'%(task))
# If importance method is '*-GEM' then store the episodic memory for the task
if 'GEM' in model.imp_method:
data_to_sample_from = {
'images': task_train_images,
'labels': task_train_labels,
}
if model.imp_method == 'S-GEM':
# Get the important samples from the current task
if is_herding: # Sampling based on MoF
# Compute the features of training data
features_dim = model.image_feature_dim
features = np.zeros([num_train_examples, features_dim])
samples_at_a_time = 32
residual = num_train_examples % samples_at_a_time
for i in range(num_train_examples// samples_at_a_time):
offset = i * samples_at_a_time
features[offset:offset+samples_at_a_time] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+samples_at_a_time],
model.y_: task_train_labels[offset:offset+samples_at_a_time], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
if residual > 0:
offset = (i + 1) * samples_at_a_time
features[offset:offset+residual] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+residual],
model.y_: task_train_labels[offset:offset+residual], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
imp_images, imp_labels = sample_from_dataset_icarl(data_to_sample_from, features, task_labels[task], SAMPLES_PER_CLASS)
else: # Random sampling
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones(num_train_examples, dtype=np.float32)
imp_images, imp_labels = sample_from_dataset(data_to_sample_from, importance_array, task_labels[task], SAMPLES_PER_CLASS)
task_memory = {
'images': deepcopy(imp_images),
'labels': deepcopy(imp_labels),
}
task_based_memory.append(task_memory)
elif model.imp_method == 'A-GEM':
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones(num_train_examples, dtype=np.float32)
if KEEP_EPISODIC_MEMORY_FULL:
update_episodic_memory(data_to_sample_from, importance_array, episodic_mem_size, task, episodic_images, episodic_labels)
else:
imp_images, imp_labels = sample_from_dataset(data_to_sample_from, importance_array, task_labels[task], SAMPLES_PER_CLASS)
if not KEEP_EPISODIC_MEMORY_FULL: # Fill the memory to always keep M/T samples per task
total_imp_samples = imp_images.shape[0]
eps_offset = task * total_imp_samples
episodic_images[eps_offset:eps_offset+total_imp_samples] = imp_images
episodic_labels[eps_offset:eps_offset+total_imp_samples, head_offset:head_offset+TOTAL_CLASSES] = imp_labels
episodic_filled_counter += total_imp_samples
print('Unique labels in the episodic memory: {}'.format(np.unique(np.nonzero(episodic_labels)[1])))
# Inspect episodic memory
if DEBUG_EPISODIC_MEMORY:
# Which labels are present in the memory
unique_labels = np.unique(np.nonzero(episodic_labels)[-1])
print('Unique Labels present in the episodic memory'.format(unique_labels))
print('Labels count:')
for lbl in unique_labels:
print('Label {}: {} samples'.format(lbl, np.where(np.nonzero(episodic_labels)[-1] == lbl)[0].size))
# Is there any space which is not filled
print('Empty space: {}'.format(np.where(np.sum(episodic_labels, axis=1) == 0)))
print('Episodic memory of {} images at task {} saved!'.format(episodic_images.shape[0], task))
# If sampling flag is set, store few of the samples from previous task
if do_sampling:
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones([datasets[task]['train']['images'].shape[0]], dtype=np.float32)
# Get the important samples from the current task
imp_images, imp_labels = sample_from_dataset(datasets[task]['train'], importance_array,
task_labels[task], SAMPLES_PER_CLASS)
if imp_images is not None:
if last_task_x is None:
last_task_x = imp_images
last_task_y_ = imp_labels
else:
last_task_x = np.concatenate((last_task_x, imp_images), axis=0)
last_task_y_ = np.concatenate((last_task_y_, imp_labels), axis=0)
# Delete the importance array now that you don't need it in the current run
del importance_array
print('\t\t\t\tEpisodic memory is saved for Task%d!'%(task))
if cross_validate_mode:
# Only evaluate after the last task
if (task == model.num_tasks - 1) or MULTI_TASK:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, online_cross_val)
elif train_single_epoch:
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, False)
print('Task: {} Acc: {}'.format(task, fbatch))
ftask[batch_dim_count] = fbatch
else:
# Multi-epoch training, so compute accuracy at the end
ftask = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, online_cross_val)
if SAVE_MODEL_PARAMS:
save(saver, sess, SNAPSHOT_DIR, iters)
if not cross_validate_mode:
# Store the accuracies computed at task T in a list
evals.append(np.array(ftask))
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
if not cross_validate_mode:
runs.append(np.array(evals))
if break_training:
break
# End for loop runid
if cross_validate_mode:
return np.mean(ftask), task_labels_dataset
else:
runs = np.array(runs)
return runs, task_labels_dataset
def test_task_sequence(model, sess, test_data, all_task_labels, task, cross_validate_mode):
"""
Snapshot the current performance
"""
final_acc = np.zeros(model.num_tasks)
test_set = 'test'
if model.imp_method == 'A-GEM':
logit_mask = np.zeros([model.num_tasks, model.total_classes])
else:
logit_mask = np.zeros(model.total_classes)
for tt, labels in enumerate(all_task_labels):
if tt > task:
return final_acc
samples_at_a_time = 10
task_images, task_labels = load_task_specific_data(test_data, labels)
global_class_indices = np.column_stack(np.nonzero(task_labels))
logit_mask_offset = tt * TOTAL_CLASSES
classes_adjusted_for_head = [cls + logit_mask_offset for cls in labels]
logit_mask[:] = 0
if model.imp_method == 'A-GEM':
logit_mask[tt][classes_adjusted_for_head] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
else:
logit_mask[classes_adjusted_for_head] = 1.0
acc = np.zeros(len(labels))
final_train_labels = np.zeros([samples_at_a_time, model.total_classes])
head_offset = tt * TOTAL_CLASSES
for cli, cls in enumerate(labels):
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])])
class_indices = np.sort(class_indices, axis=None)
task_test_images = task_images[class_indices]
task_test_labels = task_labels[class_indices]
total_test_samples = task_test_images.shape[0]
total_corrects = 0
if total_test_samples < samples_at_a_time:
i = -1
for i in range(total_test_samples/ samples_at_a_time):
offset = i*samples_at_a_time
final_train_labels[:, head_offset:head_offset+TOTAL_CLASSES] = task_test_labels[offset:offset+samples_at_a_time]
feed_dict = {model.x: task_test_images[offset:offset+samples_at_a_time],
model.y_: final_train_labels,
model.keep_prob: 1.0, model.train_phase: False}
if model.imp_method == 'A-GEM':
feed_dict.update(logit_mask_dict)
total_corrects += np.sum(sess.run(model.correct_predictions[tt], feed_dict=feed_dict))
else:
feed_dict[model.output_mask] = logit_mask
total_corrects += np.sum(sess.run(model.correct_predictions, feed_dict=feed_dict))
# Compute the corrects on residuals
offset = (i+1)*samples_at_a_time
num_residuals = total_test_samples % samples_at_a_time
final_train_labels[:num_residuals, head_offset:head_offset+TOTAL_CLASSES] = task_test_labels[offset:offset+num_residuals]
feed_dict = {model.x: task_test_images[offset:offset+num_residuals],
model.y_: final_train_labels[:num_residuals],
model.keep_prob: 1.0, model.train_phase: False}
if model.imp_method == 'A-GEM':
feed_dict.update(logit_mask_dict)
total_corrects += np.sum(sess.run(model.correct_predictions[tt], feed_dict=feed_dict))
else:
feed_dict[model.output_mask] = logit_mask
total_corrects += np.sum(sess.run(model.correct_predictions, feed_dict=feed_dict))
# Accuracy
if total_test_samples != 0:
acc[cli] = total_corrects/ float(total_test_samples)
final_acc[tt] = np.mean(acc)
return final_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
# Initialize the random seed of numpy
np.random.seed(args.random_seed)
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = NUM_TASKS - K_FOR_CROSS_VAL
# Load the split AWA dataset for all the classes
data_labs = [np.arange(TOTAL_CLASSES)]
datasets = construct_split_awa(data_labs, args.data_dir, AWA_TRAIN_LIST, AWA_VAL_LIST, AWA_TEST_LIST, IMG_HEIGHT, IMG_WIDTH)
if args.cross_validate_mode:
#models_list = MODELS
#learning_rate_list = [0.1, 0.03, 0.01, 0.003, 0.0003]
models_list = [args.imp_method]
learning_rate_list = [0.01]
else:
models_list = [args.imp_method]
for imp_method in models_list:
if imp_method == 'VAN':
synap_stgth_list = [0]
if args.online_cross_val or args.cross_validate_mode:
pass
else:
learning_rate_list = [0.001]
elif imp_method == 'PI':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10]
else:
synap_stgth_list = [1]
learning_rate_list = [0.003]
elif imp_method == 'EWC' or imp_method == 'M-EWC':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [100]
learning_rate_list = [0.003]
elif imp_method == 'MAS':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [1]
learning_rate_list = [0.003]
elif imp_method == 'RWALK':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [10] # Run again
learning_rate_list = [0.003]
elif imp_method == 'S-GEM':
synap_stgth_list = [0]
if args.online_cross_val:
pass
else:
learning_rate_list = [args.learning_rate]
elif imp_method == 'A-GEM':
synap_stgth_list = [0]
if args.online_cross_val or args.cross_validate_mode:
pass
else:
learning_rate_list = [0.01]
for synap_stgth in synap_stgth_list:
for lr in learning_rate_list:
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'ARCH': args.arch,
'DATASET': 'SPLIT_AWA',
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': imp_method,
'SYNAP_STGTH': synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': lr,
'BATCH_SIZE': args.batch_size,
'EPS_MEMORY': args.do_sampling,
'MEM_SIZE': args.mem_size,
'IS_HERDING': args.is_herding}
experiment_id = "SPLIT_AWA_ONE_HOT_HERDING_%r_%s_%r_%s_%s_%s_%s_%r_%s-"%(args.is_herding, args.arch, args.train_single_epoch, imp_method,
str(synap_stgth).replace('.', '_'), str(lr).replace('.', '_'),
str(args.batch_size), args.do_sampling, str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Reset the default graph
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(args.random_seed)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
y_ = tf.placeholder(tf.float32, shape=[None, num_tasks*TOTAL_CLASSES])
if not args.train_single_epoch:
# Define ops for data augmentation
x_aug = image_scaling(x)
x_aug = random_crop_and_pad_image(x_aug, IMG_HEIGHT, IMG_WIDTH)
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=lr)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=lr)
elif args.optim == 'MOMENTUM':
base_lr = tf.constant(lr)
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(lr, OPT_MOMENTUM)
# Create the Model/ contruct the graph
if args.train_single_epoch:
# When training using a single epoch then there is no need for data augmentation
model = Model(x, y_, num_tasks, opt, imp_method, synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, is_ATT_DATASET=True)
else:
model = Model(x_aug, y_, num_tasks, opt, imp_method, synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, is_ATT_DATASET=True, x_test=x)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=100)
runs, task_labels_dataset = train_task_sequence(model, sess, saver, datasets, args.cross_validate_mode, args.train_single_epoch,
args.do_sampling, args.is_herding, args.mem_size*CLASSES_PER_TASK*num_tasks, args.train_iters, args.batch_size, args.num_runs, args.init_checkpoint, args.online_cross_val, args.random_seed)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
print('Time spent: {}'.format(time_spent))
# Clean up
del model
if args.cross_validate_mode:
# If cross-validation flag is enabled, store the stuff in a text file
cross_validate_dump_file = args.log_dir + '/' + 'SPLIT_AWA_%s_%s'%(imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
f.write('HERDING: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(args.is_herding, args.arch, lr, synap_stgth, runs))
else:
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
exper_labels = dict(labels=task_labels_dataset)
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
snapshot_task_labels(args.log_dir, experiment_id, exper_labels)
if __name__ == '__main__':
main()
|
agem-main
|
conv_split_awa.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for split CUB experiment.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import time
import datetime
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import image_scaling, random_crop_and_pad_image, random_horizontal_flip, construct_split_cub
from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory_with_less_data, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, load_task_specific_data
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 5 # Number of experiments to average over
TRAIN_ITERS = 2000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 0.1
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_MOMENTUM = 0.9
OPT_POWER = 0.9
VALID_ARCHS = ['CNN', 'VGG', 'RESNET-B']
ARCH = 'RESNET-B'
PRETRAIN = True
## Model options
#MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'M-EWC', 'GEM', 'A-GEM', 'S-GEM'] #List of valid models
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'A-GEM'] #List of valid models
IMP_METHOD = 'PI'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 50 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 5 # Number of samples per task
IMG_HEIGHT = 224
IMG_WIDTH = 224
IMG_CHANNELS = 3
TOTAL_CLASSES = 200 # Total number of classes in the dataset
EPS_MEM_BATCH_SIZE = 128
DEBUG_EPISODIC_MEMORY = False
KEEP_EPISODIC_MEMORY_FULL = False
K_FOR_CROSS_VAL = 3
## Logging, saving and testing options
LOG_DIR = './split_cub_results'
SNAPSHOT_DIR = './cub_snapshots/sgd'
SAVE_MODEL_PARAMS = False
## Evaluation options
## Task split
NUM_TASKS = 20
MULTI_TASK = False
## Dataset specific options
DATA_DIR='CUB_data/CUB_200_2011/images'
CUB_TRAIN_LIST = './dataset_lists/CUB_train_list.txt'
CUB_TEST_LIST = './dataset_lists/CUB_test_list.txt'
RESNET18_IMAGENET_CHECKPOINT = './resnet-18-pretrained-imagenet/model.ckpt'
# Define function to load/ store training weights. We will use ImageNet initialization later on
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for split CUB experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--arch", type=str, default=ARCH,
help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--do-sampling", action="store_true",
help="Whether to do sampling")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Number of samples per class from previous tasks.")
parser.add_argument("--is-herding", action="store_true",
help="Herding based sampling")
parser.add_argument("--data-dir", type=str, default=DATA_DIR,
help="Directory from where the CUB data will be read.\
NOTE: Provide path till <CUB_DIR>/images")
parser.add_argument("--init-checkpoint", type=str, default=RESNET18_IMAGENET_CHECKPOINT,
help="Path to TF checkpoint file or npz file containing initialization for ImageNet.\
NOTE: NPZ file for VGG and TF checkpoint for ResNet")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, saver, datasets, cross_validate_mode, train_single_epoch, do_sampling, is_herding,
mem_per_class, train_iters, batch_size, num_runs, init_checkpoint, online_cross_val, random_seed):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
task_labels_dataset = []
break_training = 0
# Loop over number of runs to average over
for runid in range(num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(random_seed+runid)
# Get the task labels from the total number of tasks and full label space
task_labels = []
classes_per_task = TOTAL_CLASSES// NUM_TASKS
total_classes = classes_per_task * model.num_tasks
if online_cross_val:
label_array = np.arange(total_classes)
else:
class_label_offset = K_FOR_CROSS_VAL * classes_per_task
label_array = np.arange(class_label_offset, total_classes+class_label_offset)
np.random.shuffle(label_array)
for tt in range(model.num_tasks):
tt_offset = tt*classes_per_task
task_labels.append(list(label_array[tt_offset:tt_offset+classes_per_task]))
print('Task: {}, Labels:{}'.format(tt, task_labels[tt]))
# Store the task labels
task_labels_dataset.append(task_labels)
# Set episodic memory size
episodic_mem_size = mem_per_class * total_classes
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
if PRETRAIN:
# Load the variables from a checkpoint
if model.network_arch == 'RESNET-B':
# Define loader (weights which will be loaded from a checkpoint)
restore_vars = [v for v in model.trainable_vars if 'fc' not in v.name]
loader = tf.train.Saver(restore_vars)
load(loader, sess, init_checkpoint)
elif model.network_arch == 'VGG':
# Load the pretrained weights from the npz file
weights = np.load(init_checkpoint)
keys = sorted(weights.keys())
for i, key in enumerate(keys[:-2]): # Load everything except the last layer
sess.run(model.trainable_vars[i].assign(weights[key]))
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
# List to store the classes that we have so far - used at test time
test_labels = []
if model.imp_method == 'S-GEM':
# List to store the episodic memories of the previous tasks
task_based_memory = []
if model.imp_method == 'A-GEM':
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
episodic_labels = np.zeros([episodic_mem_size, TOTAL_CLASSES])
episodic_filled_counter = 0
a_gem_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
if do_sampling:
# List to store important samples from the previous tasks
last_task_x = None
last_task_y_ = None
# Mask for softmax
logit_mask = np.zeros(TOTAL_CLASSES)
# Training loop for all the tasks
for task in range(len(task_labels)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0):
model.restore(sess)
# If sampling flag is set append the previous datasets
if do_sampling:
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[task])
if task > 0:
task_train_images, task_train_labels = concatenate_datasets(task_tr_images, task_tr_labels, last_task_x, last_task_y_)
else:
task_train_images = task_tr_images
task_train_labels = task_tr_labels
else:
# Extract training images and labels for the current task
task_train_images, task_train_labels = load_task_specific_data(datasets[0]['train'], task_labels[task])
# If multi_task is set then train using all the datasets of all the tasks
if MULTI_TASK:
if task == 0:
for t_ in range(1, len(task_labels)):
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[t_])
task_train_images = np.concatenate((task_train_images, task_tr_images), axis=0)
task_train_labels = np.concatenate((task_train_labels, task_tr_labels), axis=0)
else:
# Skip training for this task
continue
print('Received {} images, {} labels at task {}'.format(task_train_images.shape[0], task_train_labels.shape[0], task))
print('Unique labels in the task: {}'.format(np.unique(np.nonzero(task_train_labels)[1])))
# Test for the tasks that we've seen so far
test_labels.extend(task_labels[task])
# Declare variables to store sample importance if sampling flag is set
if do_sampling:
# Get the sample weighting
task_sample_weights = get_sample_weights(task_train_labels, test_labels)
else:
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
num_train_examples = task_train_images.shape[0]
logit_mask[:] = 0
# Train a task observing sequence of data
if train_single_epoch:
# Ceiling operation
num_iters = (num_train_examples + batch_size - 1) // batch_size
if cross_validate_mode:
if do_sampling:
logit_mask[test_labels] = 1.0
else:
logit_mask[task_labels[task]] = 1.0
else:
num_iters = train_iters
# Set the mask only once before starting the training for the task
if do_sampling:
logit_mask[test_labels] = 1.0
else:
logit_mask[task_labels[task]] = 1.0
if MULTI_TASK:
logit_mask[:] = 1.0
# Randomly suffle the training examples
perm = np.arange(num_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm]
train_y = task_train_labels[perm]
task_sample_weights = task_sample_weights[perm]
# Array to store accuracies when training for task T
ftask = []
# Training loop for task T
for iters in range(num_iters):
if train_single_epoch and not cross_validate_mode and not MULTI_TASK:
if (iters < 10) or (iters % 5 == 0):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
ftask.append(fbatch)
# Set the output labels over which the model needs to be trained
if model.imp_method == 'A-GEM':
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][task_labels[task]] = 1.0
else:
logit_mask[:] = 0
if do_sampling:
logit_mask[test_labels] = 1.0
else:
logit_mask[task_labels[task]] = 1.0
if train_single_epoch:
offset = iters * batch_size
if (offset+batch_size <= num_train_examples):
residual = batch_size
else:
residual = num_train_examples - offset
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: train_y[offset:offset+residual],
model.sample_weights: task_sample_weights[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
else:
offset = (iters * batch_size) % (num_train_examples - batch_size)
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_: train_y[offset:offset+batch_size],
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
if model.imp_method == 'VAN':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
feed_dict[model.output_mask] = logit_mask
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'S-GEM':
if task == 0:
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
# Randomly sample a task from the previous tasks
prev_task = np.random.randint(0, task)
# Set the logit mask for the randomly sampled task
logit_mask[:] = 0
logit_mask[task_labels[prev_task]] = 1.0
# Store the reference gradient
sess.run(model.store_ref_grads, feed_dict={model.x: task_based_memory[prev_task]['images'], model.y_: task_based_memory[prev_task]['labels'],
model.keep_prob: 1.0, model.output_mask: logit_mask, model.train_phase: True})
# Compute the gradient for current task and project if need be
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
# Set the mask for all the previous tasks so far
a_gem_logit_mask[:] = 0
for tt in range(task):
a_gem_logit_mask[tt][task_labels[tt]] = 1.0
if KEEP_EPISODIC_MEMORY_FULL:
mem_sample_mask = np.random.choice(episodic_mem_size, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
else:
if episodic_filled_counter <= EPS_MEM_BATCH_SIZE:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
# Store the reference gradient
ref_feed_dict = {model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask],
model.keep_prob: 1.0, model.train_phase: True}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
ref_feed_dict.update(logit_mask_dict)
ref_feed_dict[model.mem_batch_size] = float(len(mem_sample_mask))
sess.run(model.store_ref_grads, feed_dict=ref_feed_dict)
# Compute the gradient for current task and project if need be
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'RWALK':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
if (iters % 50 == 0):
print('Step {:d} {:.3f}'.format(iters, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs NaNs!!!')
break_training = 1
break
print('\t\t\t\tTraining for Task%d done!'%(task))
if break_training:
break
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if task < (len(task_labels) - 1):
model.task_updates(sess, task, task_train_images, task_labels[task]) # TODO: For MAS, should the gradients be for current task or all the previous tasks
print('\t\t\t\tTask updates after Task%d done!'%(task))
# If importance method is '*-GEM' then store the episodic memory for the task
if 'GEM' in model.imp_method:
data_to_sample_from = {
'images': task_train_images,
'labels': task_train_labels,
}
if model.imp_method == 'S-GEM':
# Get the important samples from the current task
if is_herding: # Sampling based on MoF
# Compute the features of training data
features_dim = model.image_feature_dim
features = np.zeros([num_train_examples, features_dim])
samples_at_a_time = 32
residual = num_train_examples % samples_at_a_time
for i in range(num_train_examples// samples_at_a_time):
offset = i * samples_at_a_time
features[offset:offset+samples_at_a_time] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+samples_at_a_time],
model.y_: task_train_labels[offset:offset+samples_at_a_time], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
if residual > 0:
offset = (i + 1) * samples_at_a_time
features[offset:offset+residual] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+residual],
model.y_: task_train_labels[offset:offset+residual], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
imp_images, imp_labels = sample_from_dataset_icarl(data_to_sample_from, features, task_labels[task], SAMPLES_PER_CLASS)
else: # Random sampling
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones(num_train_examples, dtype=np.float32)
imp_images, imp_labels = sample_from_dataset(data_to_sample_from, importance_array, task_labels[task], SAMPLES_PER_CLASS)
task_memory = {
'images': deepcopy(imp_images),
'labels': deepcopy(imp_labels),
}
task_based_memory.append(task_memory)
elif model.imp_method == 'A-GEM':
if is_herding: # Sampling based on MoF
# Compute the features of training data
features_dim = model.image_feature_dim
features = np.zeros([num_train_examples, features_dim])
samples_at_a_time = 32
residual = num_train_examples % samples_at_a_time
for i in range(num_train_examples// samples_at_a_time):
offset = i * samples_at_a_time
features[offset:offset+samples_at_a_time] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+samples_at_a_time],
model.y_: task_train_labels[offset:offset+samples_at_a_time], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
if residual > 0:
offset = (i + 1) * samples_at_a_time
features[offset:offset+residual] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+residual],
model.y_: task_train_labels[offset:offset+residual], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
if KEEP_EPISODIC_MEMORY_FULL:
update_episodic_memory(data_to_sample_from, features, episodic_mem_size, task, episodic_images, episodic_labels, task_labels=task_labels[task], is_herding=True)
else:
imp_images, imp_labels = sample_from_dataset_icarl(data_to_sample_from, features, task_labels[task], SAMPLES_PER_CLASS)
else: # Random sampling
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones(num_train_examples, dtype=np.float32)
if KEEP_EPISODIC_MEMORY_FULL:
update_episodic_memory(data_to_sample_from, importance_array, episodic_mem_size, task, episodic_images, episodic_labels)
else:
imp_images, imp_labels = sample_from_dataset(data_to_sample_from, importance_array, task_labels[task], SAMPLES_PER_CLASS)
if not KEEP_EPISODIC_MEMORY_FULL: # Fill the memory to always keep M/T samples per task
total_imp_samples = imp_images.shape[0]
eps_offset = task * total_imp_samples
episodic_images[eps_offset:eps_offset+total_imp_samples] = imp_images
episodic_labels[eps_offset:eps_offset+total_imp_samples] = imp_labels
episodic_filled_counter += total_imp_samples
print('Unique labels in the episodic memory: {}'.format(np.unique(np.nonzero(episodic_labels)[1])))
# Inspect episodic memory
if DEBUG_EPISODIC_MEMORY:
# Which labels are present in the memory
unique_labels = np.unique(np.nonzero(episodic_labels)[-1])
print('Unique Labels present in the episodic memory'.format(unique_labels))
print('Labels count:')
for lbl in unique_labels:
print('Label {}: {} samples'.format(lbl, np.where(np.nonzero(episodic_labels)[-1] == lbl)[0].size))
# Is there any space which is not filled
print('Empty space: {}'.format(np.where(np.sum(episodic_labels, axis=1) == 0)))
print('Episodic memory of {} images at task {} saved!'.format(episodic_images.shape[0], task))
# If sampling flag is set, store few of the samples from previous task
if do_sampling:
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones([task_train_images.shape[0]], dtype=np.float32)
# Get the important samples from the current task
task_data = {
'images': task_tr_images,
'labels': task_tr_labels,
}
imp_images, imp_labels = sample_from_dataset(task_data, importance_array, task_labels[task], SAMPLES_PER_CLASS)
if imp_images is not None:
if last_task_x is None:
last_task_x = imp_images
last_task_y_ = imp_labels
else:
last_task_x = np.concatenate((last_task_x, imp_images), axis=0)
last_task_y_ = np.concatenate((last_task_y_, imp_labels), axis=0)
# Delete the importance array now that you don't need it in the current run
del importance_array
print('\t\t\t\tEpisodic memory is saved for Task%d!'%(task))
if cross_validate_mode:
# Only evaluate after the last task
if (task == model.num_tasks - 1) or MULTI_TASK:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
elif train_single_epoch:
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
print('Task: {} Acc: {}'.format(task, fbatch))
ftask.append(fbatch)
else:
# Multi-epoch training, so compute accuracy at the end
ftask = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
if SAVE_MODEL_PARAMS:
save(saver, sess, SNAPSHOT_DIR, iters)
if not cross_validate_mode:
# Store the accuracies computed at task T in a list
evals.append(np.array(ftask))
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
if not cross_validate_mode:
runs.append(np.array(evals))
if break_training:
break
# End for loop runid
if cross_validate_mode:
return np.mean(ftask), task_labels_dataset
else:
runs = np.array(runs)
return runs, task_labels_dataset
def test_task_sequence(model, sess, test_data, test_tasks, task):
"""
Snapshot the current performance
"""
final_acc = np.zeros(model.num_tasks)
if model.imp_method == 'A-GEM':
logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
else:
logit_mask = np.zeros(TOTAL_CLASSES)
for tt, labels in enumerate(test_tasks):
if not MULTI_TASK:
if tt > task:
return final_acc
task_test_images, task_test_labels = load_task_specific_data(test_data, labels)
total_test_samples = task_test_images.shape[0]
samples_at_a_time = 10
total_corrects = 0
logit_mask[:] = 0
if model.imp_method == 'A-GEM':
logit_mask[tt][labels] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
else:
logit_mask[labels] = 1.0
for i in range(total_test_samples/ samples_at_a_time):
offset = i*samples_at_a_time
feed_dict = {model.x: task_test_images[offset:offset+samples_at_a_time],
model.y_: task_test_labels[offset:offset+samples_at_a_time],
model.keep_prob: 1.0, model.train_phase: False}
if model.imp_method == 'A-GEM':
feed_dict.update(logit_mask_dict)
total_corrects += np.sum(sess.run(model.correct_predictions[tt], feed_dict=feed_dict))
else:
feed_dict[model.output_mask] = logit_mask
total_corrects += np.sum(sess.run(model.correct_predictions, feed_dict=feed_dict))
# Compute the corrects on residuals
offset = (i+1)*samples_at_a_time
num_residuals = total_test_samples % samples_at_a_time
feed_dict = {model.x: task_test_images[offset:offset+num_residuals],
model.y_: task_test_labels[offset:offset+num_residuals],
model.keep_prob: 1.0, model.train_phase: False}
if model.imp_method == 'A-GEM':
feed_dict.update(logit_mask_dict)
total_corrects += np.sum(sess.run(model.correct_predictions[tt], feed_dict=feed_dict))
else:
feed_dict[model.output_mask] = logit_mask
total_corrects += np.sum(sess.run(model.correct_predictions, feed_dict=feed_dict))
# Mean accuracy on the task
acc = total_corrects/ float(total_test_samples)
final_acc[tt] = acc
return final_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = NUM_TASKS - K_FOR_CROSS_VAL
# Load the split CUB dataset
data_labs = [np.arange(TOTAL_CLASSES)]
datasets = construct_split_cub(data_labs, args.data_dir, CUB_TRAIN_LIST, CUB_TEST_LIST, IMG_HEIGHT, IMG_WIDTH)
if args.cross_validate_mode:
#models_list = MODELS
#learning_rate_list = [0.3, 0.1, 0.01, 0.003, 0.001]
models_list = [args.imp_method]
learning_rate_list = [0.03]
else:
models_list = [args.imp_method]
for imp_method in models_list:
if imp_method == 'VAN':
synap_stgth_list = [0]
if args.online_cross_val or args.cross_validate_mode:
pass
else:
learning_rate_list = [0.03]
elif imp_method == 'PI':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10]
else:
synap_stgth_list = [0.1]
learning_rate_list = [0.03]
elif imp_method == 'EWC' or imp_method == 'M-EWC':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [1]
learning_rate_list = [0.03]
elif imp_method == 'MAS':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [0.1]
learning_rate_list = [0.03]
elif imp_method == 'RWALK':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [1]
learning_rate_list = [0.03]
elif imp_method == 'S-GEM':
synap_stgth_list = [0]
if args.online_cross_val:
pass
else:
learning_rate_list = [args.learning_rate]
elif imp_method == 'A-GEM':
synap_stgth_list = [0]
if args.online_cross_val or args.cross_validate_mode:
pass
else:
learning_rate_list = [0.03]
for synap_stgth in synap_stgth_list:
for lr in learning_rate_list:
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'ARCH': args.arch,
'DATASET': 'SPLIT_CUB',
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': imp_method,
'SYNAP_STGTH': synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': lr,
'BATCH_SIZE': args.batch_size,
'EPS_MEMORY': args.do_sampling,
'MEM_SIZE': args.mem_size,
'IS_HERDING': args.is_herding}
experiment_id = "SPLIT_CUB_ONE_HOT_HERDING_%r_%s_%r_%s_%s_%s_%s_%r_%s-"%(args.is_herding, args.arch, args.train_single_epoch, imp_method,
str(synap_stgth).replace('.', '_'), str(lr).replace('.', '_'),
str(args.batch_size), args.do_sampling, str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Reset the default graph
#tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(RANDOM_SEED)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
y_ = tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES])
if not args.train_single_epoch:
# Define ops for data augmentation
x_aug = image_scaling(x)
x_aug = random_crop_and_pad_image(x_aug, IMG_HEIGHT, IMG_WIDTH)
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=lr)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=lr)
elif args.optim == 'MOMENTUM':
base_lr = tf.constant(lr)
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(lr, OPT_MOMENTUM)
# Create the Model/ contruct the graph
if args.train_single_epoch:
# When training using a single epoch then there is no need for data augmentation
model = Model(x, y_, num_tasks, opt, imp_method, synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, is_ATT_DATASET=True)
else:
model = Model(x_aug, y_, num_tasks, opt, imp_method, synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, is_ATT_DATASET=True, x_test=x)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=100)
runs, task_labels_dataset = train_task_sequence(model, sess, saver, datasets, args.cross_validate_mode, args.train_single_epoch,
args.do_sampling, args.is_herding, args.mem_size, args.train_iters, args.batch_size, args.num_runs, args.init_checkpoint, args.online_cross_val, args.random_seed)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
print('Time spent: {}'.format(time_spent))
# Clean up
del model
if args.cross_validate_mode:
# If cross-validation flag is enabled, store the stuff in a text file
cross_validate_dump_file = args.log_dir + '/' + 'SPLIT_CUB_%s_%s'%(imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
f.write('HERDING: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(args.is_herding, args.arch, lr, synap_stgth, runs))
else:
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
exper_labels = dict(labels=task_labels_dataset)
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
snapshot_task_labels(args.log_dir, experiment_id, exper_labels)
if __name__ == '__main__':
main()
|
agem-main
|
conv_split_cub.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for split CIFAR 100 experiment.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import time
import datetime
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import construct_split_cifar
from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, load_task_specific_data
from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 5 # Number of experiments to average over
TRAIN_ITERS = 2000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 0.1
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_MOMENTUM = 0.9
OPT_POWER = 0.9
VALID_ARCHS = ['CNN', 'RESNET-S', 'RESNET-B', 'VGG']
ARCH = 'RESNET-S'
## Model options
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'M-EWC', 'S-GEM', 'A-GEM', 'FTR_EXT', 'PNN', 'ER'] #List of valid models
IMP_METHOD = 'EWC'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 50 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 13
IMG_HEIGHT = 32
IMG_WIDTH = 32
IMG_CHANNELS = 3
TOTAL_CLASSES = 100 # Total number of classes in the dataset
VISUALIZE_IMPORTANCE_MEASURE = False
MEASURE_CONVERGENCE_AFTER = 0.9
EPS_MEM_BATCH_SIZE = 256
DEBUG_EPISODIC_MEMORY = False
K_FOR_CROSS_VAL = 3
TIME_MY_METHOD = False
COUNT_VIOLATONS = False
MEASURE_PERF_ON_EPS_MEMORY = False
## Logging, saving and testing options
LOG_DIR = './split_cifar_results'
RESNET18_CIFAR10_CHECKPOINT = './resnet-18-pretrained-cifar10/model.ckpt-19999'
## Evaluation options
## Task split
NUM_TASKS = 20
MULTI_TASK = False
# Define function to load/ store training weights. We will use ImageNet initialization later on
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for split cifar experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--arch", type=str, default=ARCH,
help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Total size of episodic memory.")
parser.add_argument("--eps-mem-batch", type=int, default=EPS_MEM_BATCH_SIZE,
help="Number of samples per class from previous tasks.")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, datasets, args):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
task_labels_dataset = []
if model.imp_method == 'A-GEM' or model.imp_method == 'ER':
use_episodic_memory = True
else:
use_episodic_memory = False
batch_size = args.batch_size
# Loop over number of runs to average over
for runid in range(args.num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(args.random_seed+runid)
# Get the task labels from the total number of tasks and full label space
task_labels = []
classes_per_task = TOTAL_CLASSES// NUM_TASKS
total_classes = classes_per_task * model.num_tasks
if args.online_cross_val:
label_array = np.arange(total_classes)
else:
class_label_offset = K_FOR_CROSS_VAL * classes_per_task
label_array = np.arange(class_label_offset, total_classes+class_label_offset)
np.random.shuffle(label_array)
for tt in range(model.num_tasks):
tt_offset = tt*classes_per_task
task_labels.append(list(label_array[tt_offset:tt_offset+classes_per_task]))
print('Task: {}, Labels:{}'.format(tt, task_labels[tt]))
# Store the task labels
task_labels_dataset.append(task_labels)
# Set episodic memory size
episodic_mem_size = args.mem_size * total_classes
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
# List to store the classes that we have so far - used at test time
test_labels = []
if use_episodic_memory:
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
episodic_labels = np.zeros([episodic_mem_size, TOTAL_CLASSES])
episodic_filled_counter = 0
nd_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
count_cls = np.zeros(TOTAL_CLASSES, dtype=np.int32)
episodic_filled_counter = 0
examples_seen_so_far = 0
# Mask for softmax
logit_mask = np.zeros(TOTAL_CLASSES)
if COUNT_VIOLATONS:
violation_count = np.zeros(model.num_tasks)
vc = 0
# Training loop for all the tasks
for task in range(len(task_labels)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0 and model.imp_method != 'PNN'):
model.restore(sess)
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
pnn_train_phase[task] = True
pnn_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
# If not in the cross validation mode then concatenate the train and validation sets
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[task])
task_val_images, task_val_labels = load_task_specific_data(datasets[0]['validation'], task_labels[task])
task_train_images, task_train_labels = concatenate_datasets(task_tr_images, task_tr_labels, task_val_images, task_val_labels)
# If multi_task is set then train using all the datasets of all the tasks
if MULTI_TASK:
if task == 0:
for t_ in range(1, len(task_labels)):
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[t_])
task_train_images = np.concatenate((task_train_images, task_tr_images), axis=0)
task_train_labels = np.concatenate((task_train_labels, task_tr_labels), axis=0)
else:
# Skip training for this task
continue
print('Received {} images, {} labels at task {}'.format(task_train_images.shape[0], task_train_labels.shape[0], task))
print('Unique labels in the task: {}'.format(np.unique(np.nonzero(task_train_labels)[1])))
# Test for the tasks that we've seen so far
test_labels += task_labels[task]
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
num_train_examples = task_train_images.shape[0]
logit_mask[:] = 0
# Train a task observing sequence of data
if args.train_single_epoch:
# Ceiling operation
num_iters = (num_train_examples + batch_size - 1) // batch_size
if args.cross_validate_mode:
logit_mask[task_labels[task]] = 1.0
else:
num_iters = args.train_iters
# Set the mask only once before starting the training for the task
logit_mask[task_labels[task]] = 1.0
if MULTI_TASK:
logit_mask[:] = 1.0
# Randomly suffle the training examples
perm = np.arange(num_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm]
train_y = task_train_labels[perm]
task_sample_weights = task_sample_weights[perm]
# Array to store accuracies when training for task T
ftask = []
# Number of iterations after which convergence is checked
convergence_iters = int(num_iters * MEASURE_CONVERGENCE_AFTER)
# Training loop for task T
for iters in range(num_iters):
if args.train_single_epoch and not args.cross_validate_mode and not MULTI_TASK:
if (iters <= 20) or (iters > 20 and iters % 50 == 0):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
ftask.append(fbatch)
if model.imp_method == 'PNN':
pnn_train_phase[:] = False
pnn_train_phase[task] = True
pnn_logit_mask[:] = 0
pnn_logit_mask[task][task_labels[task]] = 1.0
elif model.imp_method == 'A-GEM':
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
else:
# Set the output labels over which the model needs to be trained
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
if args.train_single_epoch:
offset = iters * batch_size
if (offset+batch_size <= num_train_examples):
residual = batch_size
else:
residual = num_train_examples - offset
if model.imp_method == 'PNN':
feed_dict = {model.x: train_x[offset:offset+residual], model.y_[task]: train_y[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
else:
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: train_y[offset:offset+residual],
model.sample_weights: task_sample_weights[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
else:
offset = (iters * batch_size) % (num_train_examples - batch_size)
if model.imp_method == 'PNN':
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_[task]: train_y[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
else:
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_: train_y[offset:offset+batch_size],
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
if model.imp_method == 'VAN':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PNN':
_, loss = sess.run([model.train[task], model.unweighted_entropy[task]], feed_dict=feed_dict)
elif model.imp_method == 'FTR_EXT':
feed_dict[model.output_mask] = logit_mask
if task == 0:
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train_classifier, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC' or model.imp_method == 'M-EWC':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
if (iters >= convergence_iters) and (model.imp_method == 'M-EWC'):
_, _, _, _, loss = sess.run([model.weights_old_ops_grouped, model.set_tmp_fisher, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
else:
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
feed_dict[model.output_mask] = logit_mask
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
# Set the mask for all the previous tasks so far
nd_logit_mask[:] = 0
for tt in range(task):
nd_logit_mask[tt][task_labels[tt]] = 1.0
if episodic_filled_counter <= args.eps_mem_batch:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, args.eps_mem_batch, replace=False) # Sample without replacement so that we don't sample an example more than once
# Store the reference gradient
ref_feed_dict = {model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask],
model.keep_prob: 1.0, model.train_phase: True}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
ref_feed_dict.update(logit_mask_dict)
ref_feed_dict[model.mem_batch_size] = float(len(mem_sample_mask))
sess.run(model.store_ref_grads, feed_dict=ref_feed_dict)
# Compute the gradient for current task and project if need be
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
if COUNT_VIOLATONS:
vc, _, loss = sess.run([model.violation_count, model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
# Put the batch in the ring buffer
for er_x, er_y_ in zip(train_x[offset:offset+residual], train_y[offset:offset+residual]):
cls = np.unique(np.nonzero(er_y_))[-1]
# Write the example at the location pointed by count_cls[cls]
cls_to_index_map = np.where(np.array(task_labels[task]) == cls)[0][0]
with_in_task_offset = args.mem_size * cls_to_index_map
mem_index = count_cls[cls] + with_in_task_offset + episodic_filled_counter
episodic_images[mem_index] = er_x
episodic_labels[mem_index] = er_y_
count_cls[cls] = (count_cls[cls] + 1) % args.mem_size
elif model.imp_method == 'RWALK':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'ER':
mem_filled_so_far = examples_seen_so_far if (examples_seen_so_far < episodic_mem_size) else episodic_mem_size
if mem_filled_so_far < args.eps_mem_batch:
er_mem_indices = np.arange(mem_filled_so_far)
else:
er_mem_indices = np.random.choice(mem_filled_so_far, args.eps_mem_batch, replace=False)
np.random.shuffle(er_mem_indices)
nd_logit_mask[:] = 0
for tt in range(task+1):
nd_logit_mask[tt][task_labels[tt]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
er_train_x_batch = np.concatenate((episodic_images[er_mem_indices], train_x[offset:offset+residual]), axis=0)
er_train_y_batch = np.concatenate((episodic_labels[er_mem_indices], train_y[offset:offset+residual]), axis=0)
feed_dict = {model.x: er_train_x_batch, model.y_: er_train_y_batch,
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.train_phase: True}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = float(er_train_x_batch.shape[0])
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
# Reservoir update
for er_x, er_y_ in zip(train_x[offset:offset+residual], train_y[offset:offset+residual]):
update_reservior(er_x, er_y_, episodic_images, episodic_labels, episodic_mem_size, examples_seen_so_far)
examples_seen_so_far += 1
if (iters % 100 == 0):
print('Step {:d} {:.3f}'.format(iters, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs NaNs!!!')
sys.exit(0)
print('\t\t\t\tTraining for Task%d done!'%(task))
if use_episodic_memory:
episodic_filled_counter += args.mem_size * classes_per_task
if model.imp_method == 'A-GEM':
if COUNT_VIOLATONS:
violation_count[task] = vc
print('Task {}: Violation Count: {}'.format(task, violation_count))
sess.run(model.reset_violation_count, feed_dict=feed_dict)
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if (task < (len(task_labels) - 1)) or MEASURE_PERF_ON_EPS_MEMORY:
model.task_updates(sess, task, task_train_images, task_labels[task]) # TODO: For MAS, should the gradients be for current task or all the previous tasks
print('\t\t\t\tTask updates after Task%d done!'%(task))
if VISUALIZE_IMPORTANCE_MEASURE:
if runid == 0:
for i in range(len(model.fisher_diagonal_at_minima)):
if i == 0:
flatten_fisher = np.array(model.fisher_diagonal_at_minima[i].eval()).flatten()
else:
flatten_fisher = np.concatenate((flatten_fisher,
np.array(model.fisher_diagonal_at_minima[i].eval()).flatten()))
#flatten_fisher [flatten_fisher > 0.1] = 0.1
if args.train_single_epoch:
plot_histogram(flatten_fisher, 100, '/private/home/arslanch/Dropbox/LLL_experiments/Single_Epoch/importance_vis/single_epoch/m_ewc/hist_fisher_task%s.png'%(task))
else:
plot_histogram(flatten_fisher, 100, '/private/home/arslanch/Dropbox/LLL_experiments/Single_Epoch/importance_vis/single_epoch/m_ewc/hist_fisher_task%s.png'%(task))
if args.train_single_epoch and not args.cross_validate_mode:
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
print('Task: {}, Acc: {}'.format(task, fbatch))
ftask.append(fbatch)
ftask = np.array(ftask)
if model.imp_method == 'PNN':
pnn_train_phase[:] = False
pnn_train_phase[task] = True
pnn_logit_mask[:] = 0
pnn_logit_mask[task][task_labels[task]] = 1.0
else:
if MEASURE_PERF_ON_EPS_MEMORY:
eps_mem = {
'images': episodic_images,
'labels': episodic_labels,
}
# Measure perf on episodic memory
ftask = test_task_sequence(model, sess, eps_mem, task_labels, task, classes_per_task=classes_per_task)
else:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task)
print('Task: {}, Acc: {}'.format(task, ftask))
# Store the accuracies computed at task T in a list
evals.append(ftask)
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
runs.append(np.array(evals))
# End for loop runid
runs = np.array(runs)
return runs, task_labels_dataset
def test_task_sequence(model, sess, test_data, test_tasks, task, classes_per_task=0):
"""
Snapshot the current performance
"""
if TIME_MY_METHOD:
# Only compute the training time
return np.zeros(model.num_tasks)
final_acc = np.zeros(model.num_tasks)
if model.imp_method == 'PNN' or model.imp_method == 'A-GEM' or model.imp_method == 'ER':
logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
else:
logit_mask = np.zeros(TOTAL_CLASSES)
if MEASURE_PERF_ON_EPS_MEMORY:
for tt, labels in enumerate(test_tasks):
# Multi-head evaluation setting
logit_mask[:] = 0
logit_mask[labels] = 1.0
mem_offset = tt*SAMPLES_PER_CLASS*classes_per_task
feed_dict = {model.x: test_data['images'][mem_offset:mem_offset+SAMPLES_PER_CLASS*classes_per_task],
model.y_: test_data['labels'][mem_offset:mem_offset+SAMPLES_PER_CLASS*classes_per_task], model.keep_prob: 1.0, model.train_phase: False, model.output_mask: logit_mask}
acc = model.accuracy.eval(feed_dict = feed_dict)
final_acc[tt] = acc
return final_acc
for tt, labels in enumerate(test_tasks):
if not MULTI_TASK:
if tt > task:
return final_acc
task_test_images, task_test_labels = load_task_specific_data(test_data, labels)
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
logit_mask[:] = 0
logit_mask[tt][labels] = 1.0
feed_dict = {model.x: task_test_images,
model.y_[tt]: task_test_labels, model.keep_prob: 1.0}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
acc = model.accuracy[tt].eval(feed_dict = feed_dict)
elif model.imp_method == 'A-GEM' or model.imp_method == 'ER':
logit_mask[:] = 0
logit_mask[tt][labels] = 1.0
feed_dict = {model.x: task_test_images,
model.y_: task_test_labels, model.keep_prob: 1.0, model.train_phase: False}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
feed_dict.update(logit_mask_dict)
acc = model.accuracy[tt].eval(feed_dict = feed_dict)
else:
logit_mask[:] = 0
logit_mask[labels] = 1.0
feed_dict = {model.x: task_test_images,
model.y_: task_test_labels, model.keep_prob: 1.0, model.train_phase: False, model.output_mask: logit_mask}
acc = model.accuracy.eval(feed_dict = feed_dict)
final_acc[tt] = acc
return final_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'ARCH': args.arch,
'DATASET': 'SPLIT_CIFAR',
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': args.imp_method,
'SYNAP_STGTH': args.synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': args.learning_rate,
'BATCH_SIZE': args.batch_size,
'MEM_SIZE': args.mem_size}
experiment_id = "SPLIT_CIFAR_HERDING_%s_%r_%s_%s_%s_%s_%s-"%(args.arch, args.train_single_epoch, args.imp_method,
str(args.synap_stgth).replace('.', '_'), str(args.learning_rate).replace('.', '_'),
str(args.batch_size), str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Get the task labels from the total number of tasks and full label space
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = NUM_TASKS - K_FOR_CROSS_VAL
# Load the split cifar dataset
data_labs = [np.arange(TOTAL_CLASSES)]
datasets = construct_split_cifar(data_labs)
# Variables to store the accuracies and standard deviations of the experiment
acc_mean = dict()
acc_std = dict()
# Reset the default graph
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(args.random_seed)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
if args.imp_method == 'PNN':
y_ = []
for i in range(num_tasks):
y_.append(tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES]))
else:
y_ = tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES])
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)
elif args.optim == 'MOMENTUM':
base_lr = tf.constant(args.learning_rate)
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(args.learning_rate, OPT_MOMENTUM)
# Create the Model/ contruct the graph
model = Model(x, y_, num_tasks, opt, args.imp_method, args.synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
runs, task_labels_dataset = train_task_sequence(model, sess, datasets, args)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
exper_labels = dict(labels=task_labels_dataset)
# If cross-validation flag is enabled, store the stuff in a text file
if args.cross_validate_mode:
acc_mean, acc_std = average_acc_stats_across_runs(runs, model.imp_method)
fgt_mean, fgt_std = average_fgt_stats_across_runs(runs, model.imp_method)
cross_validate_dump_file = args.log_dir + '/' + 'SPLIT_CIFAR_%s_%s'%(args.imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
if MULTI_TASK:
f.write('HERDING: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(args.arch, args.learning_rate, args.synap_stgth, acc_mean[-1,:].mean()))
else:
f.write('ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {} \t Fgt: {} \t Time: {}\n'.format(args.arch, args.learning_rate,
args.synap_stgth, acc_mean, fgt_mean, str(time_spent)))
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
snapshot_task_labels(args.log_dir, experiment_id, exper_labels)
if __name__ == '__main__':
main()
|
agem-main
|
conv_split_cifar.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for split AWA experiment with hybrid learning.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import random
import time
import datetime
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import image_scaling, random_crop_and_pad_image, random_horizontal_flip, construct_split_awa
from utils.utils import get_sample_weights, sample_from_dataset, concatenate_datasets, update_episodic_memory, samples_for_each_class, sample_from_dataset_icarl, load_task_specific_data, load_task_specific_data_in_proportion
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 5 # Number of experiments to average over
TRAIN_ITERS = 2000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 0.1
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_MOMENTUM = 0.9
OPT_POWER = 0.9
VALID_ARCHS = ['CNN', 'VGG', 'RESNET-B']
ARCH = 'RESNET-B'
PRETRAIN = False
## Model options
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'A-GEM'] #List of valid models
IMP_METHOD = 'VAN'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 50 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 20 # Number of samples per task
IMG_HEIGHT = 224
IMG_WIDTH = 224
IMG_CHANNELS = 3
TOTAL_CLASSES = 50 # Total number of classes in the dataset
MEASURE_CONVERGENCE_AFTER = 0.9
EPS_MEM_BATCH_SIZE = 128
DEBUG_EPISODIC_MEMORY = False
KEEP_EPISODIC_MEMORY_FULL = False
K_FOR_CROSS_VAL = 3
CLASSES_PER_TASK = 5
## Logging, saving and testing options
LOG_DIR = './split_awa_results'
SNAPSHOT_DIR = './awa_snapshots'
SAVE_MODEL_PARAMS = False
RESNET18_IMAGENET_CHECKPOINT = './resnet-18-pretrained-imagenet/model.ckpt'
## Evaluation options
## Task split
NUM_TASKS = 20
MULTI_TASK = False
## Dataset specific options
ATTR_DIMS = 85
DATA_DIR= './AWA_data/Animals_with_Attributes2/'
AWA_TRAIN_LIST = './dataset_lists/AWA_train_list.txt'
AWA_VAL_LIST = './dataset_lists/AWA_val_list.txt'
AWA_TEST_LIST = './dataset_lists/AWA_test_list.txt'
#AWA_TRAIN_LIST = './dataset_lists/tmp_list_awa.txt'
#AWA_VAL_LIST = './dataset_lists/tmp_list_awa.txt'
#AWA_TEST_LIST = './dataset_lists/tmp_list_awa.txt'
AWA_ATTR_LIST = 'dataset_lists/AWA_attr_in_order.pickle'
# Define function to load/ store training weights. We will use ImageNet initialization later on
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for split AWA Hybrid experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--set-hybrid", action="store_true",
help="If option is chosen then train using hybrid model")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--arch", type=str, default=ARCH,
help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--do-sampling", action="store_true",
help="Whether to do sampling")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Number of samples per class from previous tasks.")
parser.add_argument("--is-herding", action="store_true",
help="Herding based sampling")
parser.add_argument("--data-dir", type=str, default=DATA_DIR,
help="Directory from where the AWA data will be read.\
NOTE: Provide path till <AWA_DIR>/Animals_with_Attributes2")
parser.add_argument("--init-checkpoint", type=str, default=RESNET18_IMAGENET_CHECKPOINT,
help="TF checkpoint file containing initialization for ImageNet.\
NOTE: NPZ file for VGG and TF Checkpoint for ResNet")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, saver, datasets, class_attr, num_classes_per_task, cross_validate_mode, train_single_epoch, do_sampling, is_herding,
episodic_mem_size, train_iters, batch_size, num_runs, init_checkpoint, online_cross_val, random_seed):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
task_labels_dataset = []
break_training = 0
# Loop over number of runs to average over
for runid in range(num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(random_seed+runid)
random.seed(random_seed+runid)
# Get the task labels from the total number of tasks and full label space
task_labels = []
classes_per_task = num_classes_per_task
classes_appearing_in_tasks = dict()
for cls in range(TOTAL_CLASSES):
classes_appearing_in_tasks[cls] = 0
if online_cross_val:
label_array = np.arange(TOTAL_CLASSES)
for tt in range(model.num_tasks):
offset = tt * classes_per_task
task_labels.append(list(label_array[offset:offset+classes_per_task]))
else:
for tt in range(model.num_tasks):
task_labels.append(random.sample(range(K_FOR_CROSS_VAL*classes_per_task, TOTAL_CLASSES), classes_per_task))
for lab in task_labels[tt]:
classes_appearing_in_tasks[lab] += 1
print('Task: {}, Labels:{}'.format(tt, task_labels[tt]))
print('Class frequency in Tasks: {}'.format(classes_appearing_in_tasks))
# Store the task labels
task_labels_dataset.append(task_labels)
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
if PRETRAIN:
# Load the variables from a checkpoint
if model.network_arch == 'RESNET-B':
# Define loader (weights which will be loaded from a checkpoint)
restore_vars = [v for v in model.trainable_vars if 'fc' not in v.name and 'attr_embed' not in v.name]
loader = tf.train.Saver(restore_vars)
load(loader, sess, init_checkpoint)
elif model.network_arch == 'VGG':
# Load the pretrained weights from the npz file
weights = np.load(init_checkpoint)
keys = sorted(weights.keys())
for i, key in enumerate(keys[:-2]): # Load everything except the last layer
sess.run(model.trainable_vars[i].assign(weights[key]))
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
if model.imp_method == 'S-GEM':
# List to store the episodic memories of the previous tasks
task_based_memory = []
if model.imp_method == 'A-GEM':
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
episodic_labels = np.zeros([episodic_mem_size, model.num_tasks*TOTAL_CLASSES])
episodic_filled_counter = 0
a_gem_logit_mask = np.zeros([model.num_tasks, model.total_classes])
# Labels for all the tasks that we have seen in the past
prev_task_labels = []
prev_class_attrs = np.zeros([model.total_classes, class_attr.shape[1]])
if do_sampling:
# List to store important samples from the previous tasks
last_task_x = None
last_task_y_ = None
# Mask for softmax
logit_mask = np.zeros(model.total_classes)
max_batch_dimension = 500
# Dict to store the number of times a class has already been seen in the training
class_seen_already = dict()
for cls in range(TOTAL_CLASSES):
class_seen_already[cls] = 0
# Training loop for all the tasks
for task in range(len(task_labels)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0):
model.restore(sess)
# Increment the class seen count
for cls in task_labels[task]:
class_seen_already[cls] += 1
task_train_images, task_train_labels = load_task_specific_data_in_proportion(datasets[0]['train'], task_labels[task], classes_appearing_in_tasks, class_seen_already)
print('Received {} images, {} labels at task {}'.format(task_train_images.shape[0], task_train_labels.shape[0], task))
print('Unique labels in the task: {}'.format(np.unique(np.nonzero(task_train_labels)[1])))
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
num_train_examples = task_train_images.shape[0]
# Train a task observing sequence of data
logit_mask[:] = 0
if train_single_epoch:
# Ceiling operation
num_iters = (num_train_examples + batch_size - 1) // batch_size
else:
num_iters = train_iters
logit_mask_offset = task * TOTAL_CLASSES
classes_adjusted_for_head = [cls + logit_mask_offset for cls in task_labels[task]]
logit_mask[classes_adjusted_for_head] = 1.0
# Randomly suffle the training examples
perm = np.arange(num_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm]
train_y = task_train_labels[perm]
task_sample_weights = task_sample_weights[perm]
# Array to store accuracies when training for task T
if cross_validate_mode:
# Because we will evaluate at the end
ftask = 0
elif train_single_epoch:
# Because we will evaluate after every mini-batch of every task
ftask = np.zeros([max_batch_dimension+1, model.num_tasks])
batch_dim_count = 0
else:
# Multi-epoch because we will evaluate after every task
ftask = []
# Attribute mask
masked_class_attrs = np.zeros([model.total_classes, class_attr.shape[1]])
masked_class_attrs[classes_adjusted_for_head] = class_attr[task_labels[task]]
#masked_class_attrs[task_labels[task]] = class_attr[task_labels[task]]
# Number of iterations after which convergence is checked
convergence_iters = int(num_iters * MEASURE_CONVERGENCE_AFTER)
final_train_labels = np.zeros([batch_size, model.total_classes])
head_offset = task * TOTAL_CLASSES
# Training loop for task T
for iters in range(num_iters):
if train_single_epoch and not cross_validate_mode:
if (iters < 11):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets[0]['test'], class_attr, num_classes_per_task, task_labels, task, online_cross_val)
ftask[batch_dim_count] = fbatch
# Increment the batch_dim_count
batch_dim_count += 1
# Set the output labels over which the model needs to be trained
if model.imp_method == 'A-GEM':
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][classes_adjusted_for_head] = 1.0
else:
logit_mask[:] = 0
logit_mask[classes_adjusted_for_head] = 1.0
offset = iters * batch_size
if (offset+batch_size <= num_train_examples):
residual = batch_size
else:
residual = num_train_examples - offset
final_train_labels[:residual, head_offset:head_offset+TOTAL_CLASSES] = train_y[offset:offset+residual]
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: final_train_labels[:residual],
model.class_attr: masked_class_attrs,
model.sample_weights: task_sample_weights[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
if model.imp_method == 'VAN':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC' or model.imp_method == 'M-EWC':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
if (iters >= convergence_iters) and (model.imp_method == 'M-EWC'):
_, _, _, _, loss = sess.run([model.weights_old_ops_grouped, model.set_tmp_fisher, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
else:
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
feed_dict[model.output_mask] = logit_mask
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'S-GEM':
if task == 0:
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
# Randomly sample a task from the previous tasks
prev_task = np.random.randint(0, task)
# Set the logit mask for the randomly sampled task
logit_mask[:] = 0
logit_mask[task_labels[prev_task]] = 1.0
prev_class_attrs = np.zeros_like(class_attr)
if online_cross_val:
attr_offset = prev_task * num_classes_per_task
else:
attr_offset = (prev_task + K_FOR_CROSS_VAL) * num_classes_per_task
prev_class_attrs[attr_offset:attr_offset+num_classes_per_task] = class_attr[attr_offset:attr_offset+num_classes_per_task]
# Store the reference gradient
sess.run(model.store_ref_grads, feed_dict={model.x: task_based_memory[prev_task]['images'], model.y_: task_based_memory[prev_task]['labels'],
model.class_attr: prev_class_attrs,
model.keep_prob: 1.0, model.output_mask: logit_mask, model.train_phase: True})
# Compute the gradient for current task and project if need be
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][classes_adjusted_for_head] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
# Set the mask for all the previous tasks so far
a_gem_logit_mask[:] = 0
for tt in range(task):
logit_mask_offset = tt * TOTAL_CLASSES
classes_adjusted_for_head = [cls + logit_mask_offset for cls in task_labels[tt]]
a_gem_logit_mask[tt][classes_adjusted_for_head] = 1.0
if KEEP_EPISODIC_MEMORY_FULL:
mem_sample_mask = np.random.choice(episodic_mem_size, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
else:
if episodic_filled_counter <= EPS_MEM_BATCH_SIZE:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
# Store the reference gradient
ref_feed_dict = {model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask], model.class_attr: prev_class_attrs,
model.keep_prob: 1.0, model.train_phase: True}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
ref_feed_dict.update(logit_mask_dict)
ref_feed_dict[model.mem_batch_size] = float(len(mem_sample_mask))
sess.run(model.store_ref_grads, feed_dict=ref_feed_dict)
# Compute the gradient for current task and project if need be
a_gem_logit_mask[:] = 0
logit_mask_offset = task * TOTAL_CLASSES
classes_adjusted_for_head = [cls + logit_mask_offset for cls in task_labels[task]]
a_gem_logit_mask[task][classes_adjusted_for_head] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'RWALK':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
if (iters % 100 == 0):
print('Step {:d} {:.3f}'.format(iters, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs NaNs!!!')
break_training = 1
break
print('\t\t\t\tTraining for Task%d done!'%(task))
if model.imp_method == 'A-GEM':
# Update the previous task labels
prev_task_labels.extend(classes_adjusted_for_head)
prev_class_attrs[classes_adjusted_for_head] = class_attr[task_labels[task]]
if break_training:
break
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if task < (len(task_labels) - 1):
# TODO: For MAS, should the gradients be for current task or all the previous tasks
model.task_updates(sess, task, task_train_images, task_labels[task], num_classes_per_task=num_classes_per_task, class_attr=class_attr, online_cross_val=online_cross_val)
print('\t\t\t\tTask updates after Task%d done!'%(task))
# If importance method is '*-GEM' then store the episodic memory for the task
if 'GEM' in model.imp_method:
data_to_sample_from = {
'images': task_train_images,
'labels': task_train_labels,
}
if model.imp_method == 'S-GEM':
# Get the important samples from the current task
if is_herding: # Sampling based on MoF
# Compute the features of training data
features_dim = model.image_feature_dim
features = np.zeros([num_train_examples, features_dim])
samples_at_a_time = 32
residual = num_train_examples % samples_at_a_time
for i in range(num_train_examples// samples_at_a_time):
offset = i * samples_at_a_time
features[offset:offset+samples_at_a_time] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+samples_at_a_time],
model.y_: task_train_labels[offset:offset+samples_at_a_time], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
if residual > 0:
offset = (i + 1) * samples_at_a_time
features[offset:offset+residual] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+residual],
model.y_: task_train_labels[offset:offset+residual], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
imp_images, imp_labels = sample_from_dataset_icarl(data_to_sample_from, features, task_labels[task], SAMPLES_PER_CLASS)
else: # Random sampling
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones(num_train_examples, dtype=np.float32)
imp_images, imp_labels = sample_from_dataset(data_to_sample_from, importance_array, task_labels[task], SAMPLES_PER_CLASS)
task_memory = {
'images': deepcopy(imp_images),
'labels': deepcopy(imp_labels),
}
task_based_memory.append(task_memory)
elif model.imp_method == 'A-GEM':
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones(num_train_examples, dtype=np.float32)
if KEEP_EPISODIC_MEMORY_FULL:
update_episodic_memory(data_to_sample_from, importance_array, episodic_mem_size, task, episodic_images, episodic_labels)
else:
imp_images, imp_labels = sample_from_dataset(data_to_sample_from, importance_array, task_labels[task], SAMPLES_PER_CLASS)
if not KEEP_EPISODIC_MEMORY_FULL: # Fill the memory to always keep M/T samples per task
total_imp_samples = imp_images.shape[0]
eps_offset = task * total_imp_samples
episodic_images[eps_offset:eps_offset+total_imp_samples] = imp_images
episodic_labels[eps_offset:eps_offset+total_imp_samples, head_offset:head_offset+TOTAL_CLASSES] = imp_labels
episodic_filled_counter += total_imp_samples
print('Unique labels in the episodic memory: {}'.format(np.unique(np.nonzero(episodic_labels)[1])))
# Inspect episodic memory
if DEBUG_EPISODIC_MEMORY:
# Which labels are present in the memory
unique_labels = np.unique(np.nonzero(episodic_labels)[-1])
print('Unique Labels present in the episodic memory'.format(unique_labels))
print('Labels count:')
for lbl in unique_labels:
print('Label {}: {} samples'.format(lbl, np.where(np.nonzero(episodic_labels)[-1] == lbl)[0].size))
# Is there any space which is not filled
print('Empty space: {}'.format(np.where(np.sum(episodic_labels, axis=1) == 0)))
print('Episodic memory of {} images at task {} saved!'.format(episodic_images.shape[0], task))
# If sampling flag is set, store few of the samples from previous task
if do_sampling:
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones([datasets[task]['train']['images'].shape[0]], dtype=np.float32)
# Get the important samples from the current task
imp_images, imp_labels = sample_from_dataset(datasets[task]['train'], importance_array,
task_labels[task], SAMPLES_PER_CLASS)
if imp_images is not None:
if last_task_x is None:
last_task_x = imp_images
last_task_y_ = imp_labels
else:
last_task_x = np.concatenate((last_task_x, imp_images), axis=0)
last_task_y_ = np.concatenate((last_task_y_, imp_labels), axis=0)
# Delete the importance array now that you don't need it in the current run
del importance_array
print('\t\t\t\tEpisodic memory is saved for Task%d!'%(task))
if cross_validate_mode:
if (task == model.num_tasks - 1) or MULTI_TASK:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets[0]['test'], class_attr, num_classes_per_task, task_labels, task, online_cross_val)
elif train_single_epoch:
fbatch = test_task_sequence(model, sess, datasets[0]['test'], class_attr, num_classes_per_task, task_labels, task, False)
ftask[batch_dim_count] = fbatch
print('Task: {}, {}'.format(task, fbatch))
else:
# Multi-epoch training, so compute accuracy at the end
ftask = test_task_sequence(model, sess, datasets[0]['test'], class_attr, num_classes_per_task, task_labels, task, online_cross_val)
if SAVE_MODEL_PARAMS:
save(saver, sess, SNAPSHOT_DIR, iters)
if not cross_validate_mode:
# Store the accuracies computed at task T in a list
evals.append(np.array(ftask))
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
if not cross_validate_mode:
runs.append(np.array(evals))
if break_training:
break
# End for loop runid
if cross_validate_mode:
return np.mean(ftask)
else:
runs = np.array(runs)
return runs, task_labels_dataset
def test_task_sequence(model, sess, test_data, class_attr, num_classes_per_task, all_task_labels, task, online_cross_val):
"""
Snapshot the current performance
"""
final_acc = np.zeros(model.num_tasks)
test_set = 'test'
if model.imp_method == 'A-GEM':
logit_mask = np.zeros([model.num_tasks, model.total_classes])
else:
logit_mask = np.zeros(model.total_classes)
for tt, labels in enumerate(all_task_labels):
if tt > task:
return final_acc
samples_at_a_time = 10
task_images, task_labels = load_task_specific_data(test_data, labels)
global_class_indices = np.column_stack(np.nonzero(task_labels))
logit_mask_offset = tt * TOTAL_CLASSES
classes_adjusted_for_head = [cls + logit_mask_offset for cls in labels]
logit_mask[:] = 0
if model.imp_method == 'A-GEM':
logit_mask[tt][classes_adjusted_for_head] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
else:
logit_mask[classes_adjusted_for_head] = 1.0
#masked_class_attrs = np.zeros_like(class_attr)
#masked_class_attrs[labels] = class_attr[labels]
masked_class_attrs = np.zeros([model.total_classes, class_attr.shape[1]])
masked_class_attrs[classes_adjusted_for_head] = class_attr[labels]
final_train_labels = np.zeros([samples_at_a_time, model.total_classes])
head_offset = tt * TOTAL_CLASSES
acc = np.zeros(len(labels))
for cli, cls in enumerate(labels):
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])])
class_indices = np.sort(class_indices, axis=None)
task_test_images = task_images[class_indices]
task_test_labels = task_labels[class_indices]
total_test_samples = task_test_images.shape[0]
total_corrects = 0
if total_test_samples < samples_at_a_time:
i = -1
for i in range(total_test_samples/ samples_at_a_time):
offset = i*samples_at_a_time
final_train_labels[:, head_offset:head_offset+TOTAL_CLASSES] = task_test_labels[offset:offset+samples_at_a_time]
feed_dict = {model.x: task_test_images[offset:offset+samples_at_a_time],
model.y_: final_train_labels,
model.class_attr: masked_class_attrs,
model.keep_prob: 1.0, model.train_phase: False}
if model.imp_method == 'A-GEM':
feed_dict.update(logit_mask_dict)
corrects = sess.run(model.correct_predictions[tt], feed_dict=feed_dict)
else:
feed_dict[model.output_mask] = logit_mask
corrects = sess.run(model.correct_predictions, feed_dict=feed_dict)
total_corrects += np.sum(corrects)
# Compute the corrects on residuals
offset = (i+1)*samples_at_a_time
num_residuals = total_test_samples % samples_at_a_time
final_train_labels[:num_residuals, head_offset:head_offset+TOTAL_CLASSES] = task_test_labels[offset:offset+num_residuals]
feed_dict = {model.x: task_test_images[offset:offset+num_residuals],
model.y_: final_train_labels[:num_residuals],
model.class_attr: masked_class_attrs,
model.keep_prob: 1.0, model.train_phase: False}
if model.imp_method == 'A-GEM':
feed_dict.update(logit_mask_dict)
corrects = sess.run(model.correct_predictions[tt], feed_dict=feed_dict)
else:
feed_dict[model.output_mask] = logit_mask
corrects = sess.run(model.correct_predictions, feed_dict=feed_dict)
total_corrects += np.sum(corrects)
if total_test_samples != 0:
# Mean accuracy on the task
acc[cli] = total_corrects/ float(total_test_samples)
final_acc[tt] = np.mean(acc)
return final_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
# Initialize the random seed of numpy
np.random.seed(args.random_seed)
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = NUM_TASKS - K_FOR_CROSS_VAL
# Load the split AWA dataset
data_labs = [np.arange(TOTAL_CLASSES)]
datasets, AWA_attr = construct_split_awa(data_labs, args.data_dir, AWA_TRAIN_LIST, AWA_VAL_LIST, AWA_TEST_LIST, IMG_HEIGHT, IMG_WIDTH, attr_file=AWA_ATTR_LIST)
if args.online_cross_val:
AWA_attr[K_FOR_CROSS_VAL*CLASSES_PER_TASK:] = 0
else:
AWA_attr[:K_FOR_CROSS_VAL*CLASSES_PER_TASK] = 0
print('Attributes: {}'.format(np.sum(AWA_attr, axis=1)))
if args.cross_validate_mode:
models_list = MODELS
learning_rate_list = [0.1, 0.03, 0.01, 0.001, 0.0003]
else:
models_list = [args.imp_method]
for imp_method in models_list:
if imp_method == 'VAN':
synap_stgth_list = [0]
if args.online_cross_val or args.cross_validate_mode:
pass
else:
learning_rate_list = [0.003]
elif imp_method == 'PI':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10]
else:
synap_stgth_list = [10]
learning_rate_list = [0.003]
elif imp_method == 'EWC' or imp_method == 'M-EWC':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [100]
learning_rate_list = [0.003]
elif imp_method == 'MAS':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [0.1]
learning_rate_list = [0.001]
elif imp_method == 'RWALK':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [10] # Check again!
learning_rate_list = [0.003]
elif imp_method == 'S-GEM':
synap_stgth_list = [0]
if args.online_cross_val:
pass
else:
learning_rate_list = [args.learning_rate]
elif imp_method == 'A-GEM':
synap_stgth_list = [0]
if args.online_cross_val or args.cross_validate_mode:
pass
else:
learning_rate_list = [0.003]
for synap_stgth in synap_stgth_list:
for lr in learning_rate_list:
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'ARCH': args.arch,
'DATASET': 'SPLIT_AWA',
'HYBRID': args.set_hybrid,
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': imp_method,
'SYNAP_STGTH': synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': lr,
'BATCH_SIZE': args.batch_size,
'EPS_MEMORY': args.do_sampling,
'MEM_SIZE': args.mem_size,
'IS_HERDING': args.is_herding}
experiment_id = "SPLIT_AWA_HERDING_%r_HYB_%r_%s_%r_%s_%s_%s_%r_%s-"%(args.is_herding, args.set_hybrid, args.arch, args.train_single_epoch, imp_method,
str(synap_stgth).replace('.', '_'),
str(args.batch_size), args.do_sampling, str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Reset the default graph
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(args.random_seed)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
y_ = tf.placeholder(tf.float32, shape=[None, num_tasks*TOTAL_CLASSES])
attr = tf.placeholder(tf.float32, shape=[num_tasks*TOTAL_CLASSES, ATTR_DIMS])
if not args.train_single_epoch:
# Define ops for data augmentation
x_aug = image_scaling(x)
x_aug = random_crop_and_pad_image(x_aug, IMG_HEIGHT, IMG_WIDTH)
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=lr)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=lr)
elif args.optim == 'MOMENTUM':
base_lr = tf.constant(lr)
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(lr, OPT_MOMENTUM)
# Create the Model/ contruct the graph
if args.train_single_epoch:
# When training using a single epoch then there is no need for data augmentation
model = Model(x, y_, num_tasks, opt, imp_method, synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, is_ATT_DATASET=True, attr=attr)
else:
model = Model(x_aug, y_, num_tasks, opt, imp_method, synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, is_ATT_DATASET=True, x_test=x, attr=attr)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=100)
runs, task_labels_dataset = train_task_sequence(model, sess, saver, datasets, AWA_attr, CLASSES_PER_TASK, args.cross_validate_mode,
args.train_single_epoch, args.do_sampling, args.is_herding, args.mem_size*CLASSES_PER_TASK*num_tasks, args.train_iters,
args.batch_size, args.num_runs, args.init_checkpoint, args.online_cross_val, args.random_seed)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
print('Time spent: {}'.format(time_spent))
# Clean up
del model
if args.cross_validate_mode:
# If cross-validation flag is enabled, store the stuff in a text file
cross_validate_dump_file = args.log_dir + '/' + 'SPLIT_AWA_HYBRID_%s_%s'%(imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
f.write('HERDING: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(args.is_herding, args.arch, lr, synap_stgth, runs))
else:
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
exper_labels = dict(labels=task_labels_dataset)
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
snapshot_task_labels(args.log_dir, experiment_id, exper_labels)
if __name__ == '__main__':
main()
|
agem-main
|
conv_split_awa_hybrid.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for split CUB experiment with zero shot transfer.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import time
import datetime
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import image_scaling, random_crop_and_pad_image, random_horizontal_flip, construct_split_cub
from utils.utils import get_sample_weights, sample_from_dataset, concatenate_datasets, update_episodic_memory_with_less_data, samples_for_each_class, sample_from_dataset_icarl, load_task_specific_data
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 5 # Number of experiments to average over
TRAIN_ITERS = 2000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 0.1
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_MOMENTUM = 0.9
OPT_POWER = 0.9
VALID_ARCHS = ['CNN', 'VGG', 'RESNET-B']
ARCH = 'RESNET-B'
PRETRAIN = True
## Model options
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'A-GEM'] #List of valid models
IMP_METHOD = 'EWC'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 50 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 5 # Number of samples per task
IMG_HEIGHT = 224
IMG_WIDTH = 224
IMG_CHANNELS = 3
TOTAL_CLASSES = 200 # Total number of classes in the dataset
EPS_MEM_BATCH_SIZE = 128
DEBUG_EPISODIC_MEMORY = False
KEEP_EPISODIC_MEMORY_FULL = False
K_FOR_CROSS_VAL = 3
## Logging, saving and testing options
LOG_DIR = './split_cub_results'
SNAPSHOT_DIR = './cub_snapshots'
SAVE_MODEL_PARAMS = False
## Evaluation options
## Task split
NUM_TASKS = 20
MULTI_TASK = False
## Dataset specific options
ATTR_DIMS = 312
DATA_DIR='CUB_data/CUB_200_2011/images'
#CUB_TRAIN_LIST = 'dataset_lists/tmp_list.txt'
#CUB_TEST_LIST = 'dataset_lists/tmp_list.txt'
CUB_TRAIN_LIST = 'dataset_lists/CUB_train_list.txt'
CUB_TEST_LIST = 'dataset_lists/CUB_test_list.txt'
CUB_ATTR_LIST = 'dataset_lists/CUB_attr_in_order.pickle'
RESNET18_IMAGENET_CHECKPOINT = './resnet-18-pretrained-imagenet/model.ckpt'
# Define function to load/ store training weights. We will use ImageNet initialization later on
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for split CUB hybrid experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--set-hybrid", action="store_true",
help="If option is chosen then train using hybrid model")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--arch", type=str, default=ARCH,
help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--do-sampling", action="store_true",
help="Whether to do sampling")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Number of samples per class from previous tasks.")
parser.add_argument("--is-herding", action="store_true",
help="Herding based sampling")
parser.add_argument("--data-dir", type=str, default=DATA_DIR,
help="Directory from where the CUB data will be read.\
NOTE: Provide path till <CUB_DIR>/images")
parser.add_argument("--init-checkpoint", type=str, default=RESNET18_IMAGENET_CHECKPOINT,
help="TF checkpoint file containing initialization for ImageNet.\
NOTE: NPZ file for VGG and TF Checkpoint for ResNet")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, saver, datasets, class_attr, classes_per_task, cross_validate_mode, train_single_epoch, do_sampling, is_herding,
mem_per_class, train_iters, batch_size, num_runs, init_checkpoint, online_cross_val, random_seed):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
task_labels_dataset = []
break_training = 0
# Loop over number of runs to average over
for runid in range(num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(random_seed+runid)
# Get the task labels from the total number of tasks and full label space
task_labels = []
total_classes = classes_per_task * model.num_tasks
if online_cross_val:
label_array = np.arange(total_classes)
else:
class_label_offset = K_FOR_CROSS_VAL * classes_per_task
label_array = np.arange(class_label_offset, total_classes+class_label_offset)
np.random.shuffle(label_array)
for tt in range(model.num_tasks):
tt_offset = tt*classes_per_task
task_labels.append(list(label_array[tt_offset:tt_offset+classes_per_task]))
print('Task: {}, Labels:{}'.format(tt, task_labels[tt]))
# Store the task labels
task_labels_dataset.append(task_labels)
# Set episodic memory size
episodic_mem_size = mem_per_class * total_classes
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
if PRETRAIN:
# Load the variables from a checkpoint
if model.network_arch == 'RESNET-B':
# Define loader (weights which will be loaded from a checkpoint)
restore_vars = [v for v in model.trainable_vars if 'fc' not in v.name and 'attr_embed' not in v.name]
loader = tf.train.Saver(restore_vars)
load(loader, sess, init_checkpoint)
elif model.network_arch == 'VGG':
# Load the pretrained weights from the npz file
weights = np.load(init_checkpoint)
keys = sorted(weights.keys())
for i, key in enumerate(keys[:-2]): # Load everything except the last layer
sess.run(model.trainable_vars[i].assign(weights[key]))
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
# List to store the classes that we have so far - used at test time
test_labels = []
if model.imp_method == 'S-GEM':
# List to store the episodic memories of the previous tasks
task_based_memory = []
if model.imp_method == 'A-GEM':
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
episodic_labels = np.zeros([episodic_mem_size, TOTAL_CLASSES])
episodic_filled_counter = 0
a_gem_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
# Labels for all the tasks that we have seen in the past
prev_task_labels = []
prev_class_attrs = np.zeros_like(class_attr)
if do_sampling:
# List to store important samples from the previous tasks
last_task_x = None
last_task_y_ = None
# Mask for the softmax
logit_mask = np.zeros(TOTAL_CLASSES)
# Training loop for all the tasks
for task in range(len(task_labels)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0):
model.restore(sess)
# If sampling flag is set append the previous datasets
if do_sampling:
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[task])
if task > 0:
task_train_images, task_train_labels = concatenate_datasets(task_tr_images, task_tr_labels, last_task_x, last_task_y_)
else:
task_train_images = task_tr_images
task_train_labels = task_tr_labels
else:
# Extract training images and labels for the current task
task_train_images, task_train_labels = load_task_specific_data(datasets[0]['train'], task_labels[task])
# If multi_task is set then train using all the datasets of all the tasks
if MULTI_TASK:
if task == 0:
for t_ in range(1, len(task_labels)):
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[t_])
task_train_images = np.concatenate((task_train_images, task_tr_images), axis=0)
task_train_labels = np.concatenate((task_train_labels, task_tr_labels), axis=0)
else:
# Skip training for this task
continue
print('Received {} images, {} labels at task {}'.format(task_train_images.shape[0], task_train_labels.shape[0], task))
# Test for the tasks that we've seen so far
test_labels.extend(task_labels[task])
# Declare variables to store sample importance if sampling flag is set
if do_sampling:
# Get the sample weighting
task_sample_weights = get_sample_weights(task_train_labels, test_labels)
else:
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
num_train_examples = task_train_images.shape[0]
# Train a task observing sequence of data
logit_mask[:] = 0
if train_single_epoch:
# Ceiling operation
num_iters = (num_train_examples + batch_size - 1) // batch_size
if cross_validate_mode:
if do_sampling:
logit_mask[test_labels] = 1.0
else:
logit_mask[task_labels[task]] = 1.0
else:
num_iters = train_iters
if do_sampling:
logit_mask[test_labels] = 1.0
else:
logit_mask[task_labels[task]] = 1.0
# Randomly suffle the training examples
perm = np.arange(num_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm]
train_y = task_train_labels[perm]
task_sample_weights = task_sample_weights[perm]
# Array to store accuracies when training for task T
ftask = []
if MULTI_TASK:
logit_mask[:] = 1.0
masked_class_attrs = class_attr
else:
# Attribute mask
masked_class_attrs = np.zeros_like(class_attr)
if do_sampling:
masked_class_attrs[test_labels] = class_attr[test_labels]
else:
masked_class_attrs[task_labels[task]] = class_attr[task_labels[task]]
# Training loop for task T
for iters in range(num_iters):
if train_single_epoch and not cross_validate_mode and not MULTI_TASK:
#if (iters <= 50 and iters % 5 == 0) or (iters > 50 and iters % 50 == 0):
if (iters < 10) or (iters % 5 == 0):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets[0]['test'], class_attr, classes_per_task, task_labels, task)
ftask.append(fbatch)
# Set the output labels over which the model needs to be trained
if model.imp_method == 'A-GEM':
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][task_labels[task]] = 1.0
else:
logit_mask[:] = 0
if do_sampling:
logit_mask[test_labels] = 1.0
else:
logit_mask[task_labels[task]] = 1.0
if train_single_epoch:
offset = iters * batch_size
if (offset+batch_size <= num_train_examples):
residual = batch_size
else:
residual = num_train_examples - offset
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: train_y[offset:offset+residual],
model.class_attr: masked_class_attrs,
model.sample_weights: task_sample_weights[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
else:
offset = (iters * batch_size) % (num_train_examples - batch_size)
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_: train_y[offset:offset+batch_size],
model.class_attr: masked_class_attrs,
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
if model.imp_method == 'VAN':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
feed_dict[model.output_mask] = logit_mask
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'S-GEM':
if task == 0:
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
# Randomly sample a task from the previous tasks
prev_task = np.random.randint(0, task)
# Set the logit mask for the randomly sampled task
logit_mask[:] = 0
logit_mask[task_labels[prev_task]] = 1.0
prev_class_attrs = np.zeros_like(class_attr)
prev_class_attrs[task_labels[prev_task]] = class_attr[task_labels[prev_task]]
# Store the reference gradient
sess.run(model.store_ref_grads, feed_dict={model.x: task_based_memory[prev_task]['images'], model.y_: task_based_memory[prev_task]['labels'],
model.class_attr: prev_class_attrs,
model.keep_prob: 1.0, model.output_mask: logit_mask, model.train_phase: True})
# Compute the gradient for current task and project if need be
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
# Set the mask for all the previous tasks so far
a_gem_logit_mask[:] = 0
for tt in range(task):
a_gem_logit_mask[tt][task_labels[tt]] = 1.0
if KEEP_EPISODIC_MEMORY_FULL:
mem_sample_mask = np.random.choice(episodic_mem_size, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
else:
if episodic_filled_counter <= EPS_MEM_BATCH_SIZE:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
ref_feed_dict = {model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask],
model.class_attr: prev_class_attrs, model.keep_prob: 1.0, model.train_phase: True}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
ref_feed_dict.update(logit_mask_dict)
ref_feed_dict[model.mem_batch_size] = float(len(mem_sample_mask))
sess.run(model.store_ref_grads, feed_dict=ref_feed_dict)
# Compute the gradient for current task and project if need be
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'RWALK':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
if (iters % 50 == 0):
print('Step {:d} {:.3f}'.format(iters, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs NaNs!!!')
break_training = 1
break
print('\t\t\t\tTraining for Task%d done!'%(task))
if model.imp_method == 'A-GEM':
# Update the previous task labels and attributes
prev_task_labels += task_labels[task]
prev_class_attrs[prev_task_labels] = class_attr[prev_task_labels]
if break_training:
break
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if task < (len(task_labels) - 1):
# TODO: For MAS, should the gradients be for current task or all the previous tasks
model.task_updates(sess, task, task_train_images, task_labels[task], num_classes_per_task=classes_per_task, class_attr=class_attr, online_cross_val=online_cross_val)
print('\t\t\t\tTask updates after Task%d done!'%(task))
# If importance method is '*-GEM' then store the episodic memory for the task
if 'GEM' in model.imp_method:
data_to_sample_from = {
'images': task_train_images,
'labels': task_train_labels,
}
if model.imp_method == 'S-GEM':
# Get the important samples from the current task
if is_herding: # Sampling based on MoF
# Compute the features of training data
features_dim = model.image_feature_dim
features = np.zeros([num_train_examples, features_dim])
samples_at_a_time = 32
residual = num_train_examples % samples_at_a_time
for i in range(num_train_examples// samples_at_a_time):
offset = i * samples_at_a_time
features[offset:offset+samples_at_a_time] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+samples_at_a_time],
model.y_: task_train_labels[offset:offset+samples_at_a_time], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
if residual > 0:
offset = (i + 1) * samples_at_a_time
features[offset:offset+residual] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+residual],
model.y_: task_train_labels[offset:offset+residual], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
imp_images, imp_labels = sample_from_dataset_icarl(data_to_sample_from, features, task_labels[task], SAMPLES_PER_CLASS)
else: # Random sampling
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones(num_train_examples, dtype=np.float32)
imp_images, imp_labels = sample_from_dataset(data_to_sample_from, importance_array, task_labels[task], SAMPLES_PER_CLASS)
task_memory = {
'images': deepcopy(imp_images),
'labels': deepcopy(imp_labels),
}
task_based_memory.append(task_memory)
elif model.imp_method == 'A-GEM':
if is_herding: # Sampling based on MoF
# Compute the features of training data
features_dim = model.image_feature_dim
features = np.zeros([num_train_examples, features_dim])
samples_at_a_time = 32
residual = num_train_examples % samples_at_a_time
for i in range(num_train_examples// samples_at_a_time):
offset = i * samples_at_a_time
features[offset:offset+samples_at_a_time] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+samples_at_a_time],
model.y_: task_train_labels[offset:offset+samples_at_a_time], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
if residual > 0:
offset = (i + 1) * samples_at_a_time
features[offset:offset+residual] = sess.run(model.features, feed_dict={model.x: task_train_images[offset:offset+residual],
model.y_: task_train_labels[offset:offset+residual], model.keep_prob: 1.0,
model.output_mask: logit_mask, model.train_phase: False})
if KEEP_EPISODIC_MEMORY_FULL:
update_episodic_memory(data_to_sample_from, features, episodic_mem_size, task, episodic_images, episodic_labels, task_labels=task_labels[task], is_herding=True)
else:
imp_images, imp_labels = sample_from_dataset_icarl(data_to_sample_from, features, task_labels[task], SAMPLES_PER_CLASS)
else: # Random sampling
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones(num_train_examples, dtype=np.float32)
if KEEP_EPISODIC_MEMORY_FULL:
update_episodic_memory(data_to_sample_from, importance_array, episodic_mem_size, task, episodic_images, episodic_labels)
else:
imp_images, imp_labels = sample_from_dataset(data_to_sample_from, importance_array, task_labels[task], SAMPLES_PER_CLASS)
if not KEEP_EPISODIC_MEMORY_FULL: # Fill the memory to always keep M/T samples per task
total_imp_samples = imp_images.shape[0]
eps_offset = task * total_imp_samples
episodic_images[eps_offset:eps_offset+total_imp_samples] = imp_images
episodic_labels[eps_offset:eps_offset+total_imp_samples] = imp_labels
episodic_filled_counter += total_imp_samples
# Inspect episodic memory
if DEBUG_EPISODIC_MEMORY:
# Which labels are present in the memory
unique_labels = np.unique(np.nonzero(episodic_labels)[-1])
print('Unique Labels present in the episodic memory'.format(unique_labels))
print('Labels count:')
for lbl in unique_labels:
print('Label {}: {} samples'.format(lbl, np.where(np.nonzero(episodic_labels)[-1] == lbl)[0].size))
# Is there any space which is not filled
print('Empty space: {}'.format(np.where(np.sum(episodic_labels, axis=1) == 0)))
print('Episodic memory of {} images at task {} saved!'.format(episodic_images.shape[0], task))
# If sampling flag is set, store few of the samples from previous task
if do_sampling:
# Do the uniform sampling/ only get examples from current task
importance_array = np.ones([task_train_images.shape[0]], dtype=np.float32)
# Get the important samples from the current task
task_data = {
'images': task_tr_images,
'labels': task_tr_labels,
}
imp_images, imp_labels = sample_from_dataset(task_data, importance_array, task_labels[task], SAMPLES_PER_CLASS)
if imp_images is not None:
if last_task_x is None:
last_task_x = imp_images
last_task_y_ = imp_labels
else:
last_task_x = np.concatenate((last_task_x, imp_images), axis=0)
last_task_y_ = np.concatenate((last_task_y_, imp_labels), axis=0)
# Delete the importance array now that you don't need it in the current run
del importance_array
print('\t\t\t\tEpisodic memory is saved for Task%d!'%(task))
if cross_validate_mode:
if (task == model.num_tasks - 1) or MULTI_TASK:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets[0]['test'], class_attr, classes_per_task, task_labels, task)
elif train_single_epoch:
fbatch = test_task_sequence(model, sess, datasets[0]['test'], class_attr, classes_per_task, task_labels, task)
print('Task: {} Acc: {}'.format(task, fbatch))
ftask.append(fbatch)
else:
# Multi-epoch training, so compute accuracy at the end
ftask = test_task_sequence(model, sess, datasets[0]['test'], class_attr, classes_per_task, task_labels, task)
if SAVE_MODEL_PARAMS:
save(saver, sess, SNAPSHOT_DIR, iters)
if not cross_validate_mode:
# Store the accuracies computed at task T in a list
evals.append(np.array(ftask))
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
if not cross_validate_mode:
runs.append(np.array(evals))
if break_training:
break
# End for loop runid
if cross_validate_mode:
return np.mean(ftask)
else:
runs = np.array(runs)
return runs, task_labels_dataset
def test_task_sequence(model, sess, test_data, class_attr, num_classes_per_task, test_tasks, task):
"""
Snapshot the current performance
"""
final_acc = np.zeros(model.num_tasks)
if model.imp_method == 'A-GEM':
logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
else:
logit_mask = np.zeros(TOTAL_CLASSES)
for tt, labels in enumerate(test_tasks):
if not MULTI_TASK:
if tt > task:
return final_acc
masked_class_attrs = np.zeros_like(class_attr)
masked_class_attrs[labels] = class_attr[labels]
task_test_images, task_test_labels = load_task_specific_data(test_data, labels)
total_test_samples = task_test_images.shape[0]
samples_at_a_time = 10
total_corrects = 0
logit_mask[:] = 0
if model.imp_method == 'A-GEM':
logit_mask[tt][labels] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
else:
logit_mask[labels] = 1.0
for i in range(total_test_samples/ samples_at_a_time):
offset = i*samples_at_a_time
feed_dict = {model.x: task_test_images[offset:offset+samples_at_a_time],
model.y_: task_test_labels[offset:offset+samples_at_a_time],
model.class_attr: masked_class_attrs,
model.keep_prob: 1.0, model.train_phase: False}
if model.imp_method == 'A-GEM':
feed_dict.update(logit_mask_dict)
total_corrects += np.sum(sess.run(model.correct_predictions[tt], feed_dict=feed_dict))
else:
feed_dict[model.output_mask] = logit_mask
total_corrects += np.sum(sess.run(model.correct_predictions, feed_dict=feed_dict))
# Compute the corrects on residuals
offset = (i+1)*samples_at_a_time
num_residuals = total_test_samples % samples_at_a_time
feed_dict = {model.x: task_test_images[offset:offset+num_residuals],
model.y_: task_test_labels[offset:offset+num_residuals],
model.class_attr: masked_class_attrs,
model.keep_prob: 1.0, model.train_phase: False}
if model.imp_method == 'A-GEM':
feed_dict.update(logit_mask_dict)
total_corrects += np.sum(sess.run(model.correct_predictions[tt], feed_dict=feed_dict))
else:
feed_dict[model.output_mask] = logit_mask
total_corrects += np.sum(sess.run(model.correct_predictions, feed_dict=feed_dict))
# Mean accuracy on the task
acc = total_corrects/ float(total_test_samples)
final_acc[tt] = acc
return final_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
# Get the task labels from the total number of tasks and full label space
classes_per_task = TOTAL_CLASSES// NUM_TASKS
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = NUM_TASKS - K_FOR_CROSS_VAL
# Load the split CUB dataset
data_labs = [np.arange(TOTAL_CLASSES)]
datasets, CUB_attr = construct_split_cub(data_labs, args.data_dir, CUB_TRAIN_LIST, CUB_TEST_LIST, IMG_HEIGHT, IMG_WIDTH, attr_file=CUB_ATTR_LIST)
if args.online_cross_val:
CUB_attr[K_FOR_CROSS_VAL*classes_per_task:] = 0
else:
CUB_attr[:K_FOR_CROSS_VAL*classes_per_task] = 0
if args.cross_validate_mode:
models_list = MODELS
learning_rate_list = [0.3, 0.1, 0.01, 0.003, 0.001]
else:
models_list = [args.imp_method]
for imp_method in models_list:
if imp_method == 'VAN':
synap_stgth_list = [0]
if args.online_cross_val or args.cross_validate_mode:
pass
else:
learning_rate_list = [0.03]
elif imp_method == 'PI':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10]
else:
synap_stgth_list = [0.1]
learning_rate_list = [0.03]
elif imp_method == 'EWC' or imp_method == 'M-EWC':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [10]
learning_rate_list = [0.03]
elif imp_method == 'MAS':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [0.1]
learning_rate_list = [0.03]
elif imp_method == 'RWALK':
if args.online_cross_val or args.cross_validate_mode:
synap_stgth_list = [0.1, 1, 10, 100]
else:
synap_stgth_list = [1]
learning_rate_list = [0.03]
elif imp_method == 'S-GEM':
synap_stgth_list = [0]
if args.online_cross_val:
pass
else:
learning_rate_list = [args.learning_rate]
elif imp_method == 'A-GEM':
synap_stgth_list = [0]
if args.online_cross_val or args.cross_validate_mode:
pass
else:
learning_rate_list = [0.03]
for synap_stgth in synap_stgth_list:
for lr in learning_rate_list:
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'ARCH': args.arch,
'DATASET': 'SPLIT_CUB',
'HYBRID': args.set_hybrid,
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': imp_method,
'SYNAP_STGTH': synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': lr,
'BATCH_SIZE': args.batch_size,
'EPS_MEMORY': args.do_sampling,
'MEM_SIZE': args.mem_size,
'IS_HERDING': args.is_herding}
experiment_id = "SPLIT_CUB_HERDING_%r_HYB_%r_%s_%r_%s_%s_%s_%r_%s-"%(args.is_herding, args.set_hybrid, args.arch, args.train_single_epoch, imp_method,
str(synap_stgth).replace('.', '_'),
str(args.batch_size), args.do_sampling, str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Reset the default graph
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(RANDOM_SEED)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
y_ = tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES])
attr = tf.placeholder(tf.float32, shape=[TOTAL_CLASSES, ATTR_DIMS])
if not args.train_single_epoch:
# Define ops for data augmentation
x_aug = image_scaling(x)
x_aug = random_crop_and_pad_image(x_aug, IMG_HEIGHT, IMG_WIDTH)
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=lr)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=lr)
elif args.optim == 'MOMENTUM':
base_lr = tf.constant(lr)
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(lr, OPT_MOMENTUM)
# Create the Model/ contruct the graph
if args.train_single_epoch:
# When training using a single epoch then there is no need for data augmentation
model = Model(x, y_, num_tasks, opt, imp_method, synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, is_ATT_DATASET=True, attr=attr)
else:
model = Model(x_aug, y_, num_tasks, opt, imp_method, synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, network_arch=args.arch, is_ATT_DATASET=True, x_test=x, attr=attr)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=100)
runs, task_labels_dataset = train_task_sequence(model, sess, saver, datasets, CUB_attr, classes_per_task, args.cross_validate_mode,
args.train_single_epoch, args.do_sampling, args.is_herding, args.mem_size, args.train_iters,
args.batch_size, args.num_runs, args.init_checkpoint, args.online_cross_val, args.random_seed)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
print('Time spent: {}'.format(time_spent))
# Clean up
del model
if args.cross_validate_mode:
# If cross-validation flag is enabled, store the stuff in a text file
cross_validate_dump_file = args.log_dir + '/' + 'SPLIT_CUB_%s_%s'%(imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
f.write('HERDING: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(args.is_herding, args.arch, lr, synap_stgth, runs))
else:
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
exper_labels = dict(labels=task_labels_dataset)
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
snapshot_task_labels(args.log_dir, experiment_id, exper_labels)
if __name__ == '__main__':
main()
|
agem-main
|
conv_split_cub_hybrid.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import tensorflow as tf
import numpy as np
def vgg_conv_layer(x, kernel_size, out_channels, stride, var_list, pad="SAME", name="conv"):
"""
Define API for conv operation. This includes kernel declaration and
conv operation both followed by relu.
"""
in_channels = x.get_shape().as_list()[-1]
with tf.variable_scope(name):
#n = kernel_size * kernel_size * out_channels
n = kernel_size * in_channels
stdv = 1.0 / math.sqrt(n)
w = tf.get_variable('kernel_weights', [kernel_size, kernel_size, in_channels, out_channels],
tf.float32,
initializer=tf.random_uniform_initializer(-stdv, stdv))
b = tf.get_variable('kernel_biases', [out_channels], tf.float32, initializer=tf.random_uniform_initializer(-stdv, stdv))
# Append the variable to the trainable variables list
var_list.append(w)
var_list.append(b)
# Do the convolution operation
bias = tf.nn.bias_add(tf.nn.conv2d(x, w, [1, stride, stride, 1], padding=pad), b)
relu = tf.nn.relu(bias)
return relu
def vgg_fc_layer(x, out_dim, var_list, apply_relu=True, name="fc"):
"""
Define API for the fully connected layer. This includes both the variable
declaration and matmul operation.
"""
in_dim = x.get_shape().as_list()[1]
stdv = 1.0 / math.sqrt(in_dim)
with tf.variable_scope(name):
# Define the weights and biases for this layer
w = tf.get_variable('weights', [in_dim, out_dim], tf.float32,
initializer=tf.random_uniform_initializer(-stdv, stdv))
b = tf.get_variable('biases', [out_dim], tf.float32, initializer=tf.random_uniform_initializer(-stdv, stdv))
# Append the variable to the trainable variables list
var_list.append(w)
var_list.append(b)
# Do the FC operation
output = tf.matmul(x, w) + b
# Apply relu if needed
if apply_relu:
output = tf.nn.relu(output)
return output
|
agem-main
|
utils/vgg_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .data_utils import construct_permute_mnist, construct_split_mnist, construct_split_cifar, construct_split_cub, construct_split_imagenet
from .data_utils import image_scaling, random_crop_and_pad_image, random_horizontal_flip
from .utils import clone_variable_list, create_fc_layer, create_conv_layer, sample_from_dataset, update_episodic_memory, update_episodic_memory_with_less_data, concatenate_datasets
from .utils import samples_for_each_class, sample_from_dataset_icarl, get_sample_weights, compute_fgt, load_task_specific_data, load_task_specific_data_in_proportion
from .utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior
from .vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from .resnet_utils import _conv, _fc, _bn, _residual_block, _residual_block_first
from .vgg_utils import vgg_conv_layer, vgg_fc_layer
|
agem-main
|
utils/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import tensorflow as tf
import numpy as np
def _conv(x, kernel_size, out_channels, stride, var_list, pad="SAME", name="conv"):
"""
Define API for conv operation. This includes kernel declaration and
conv operation both.
"""
in_channels = x.get_shape().as_list()[-1]
with tf.variable_scope(name):
#n = kernel_size * kernel_size * out_channels
n = kernel_size * in_channels
stdv = 1.0 / math.sqrt(n)
w = tf.get_variable('kernel', [kernel_size, kernel_size, in_channels, out_channels],
tf.float32,
initializer=tf.random_uniform_initializer(-stdv, stdv))
#initializer=tf.random_normal_initializer(stddev=np.sqrt(2.0/n)))
# Append the variable to the trainable variables list
var_list.append(w)
# Do the convolution operation
output = tf.nn.conv2d(x, w, [1, stride, stride, 1], padding=pad)
return output
def _fc(x, out_dim, var_list, name="fc", is_cifar=False):
"""
Define API for the fully connected layer. This includes both the variable
declaration and matmul operation.
"""
in_dim = x.get_shape().as_list()[1]
stdv = 1.0 / math.sqrt(in_dim)
with tf.variable_scope(name):
# Define the weights and biases for this layer
w = tf.get_variable('weights', [in_dim, out_dim], tf.float32,
initializer=tf.random_uniform_initializer(-stdv, stdv))
#initializer=tf.truncated_normal_initializer(stddev=0.1))
if is_cifar:
b = tf.get_variable('biases', [out_dim], tf.float32, initializer=tf.random_uniform_initializer(-stdv, stdv))
else:
b = tf.get_variable('biases', [out_dim], tf.float32, initializer=tf.constant_initializer(0))
# Append the variable to the trainable variables list
var_list.append(w)
var_list.append(b)
# Do the FC operation
output = tf.matmul(x, w) + b
return output
def _bn(x, var_list, train_phase, name='bn_'):
"""
Batch normalization on convolutional maps.
Args:
Return:
"""
n_out = x.get_shape().as_list()[3]
with tf.variable_scope(name):
beta = tf.get_variable('beta', shape=[n_out], dtype=tf.float32, initializer=tf.constant_initializer(0.0))
gamma = tf.get_variable('gamma', shape=[n_out], dtype=tf.float32, initializer=tf.constant_initializer(1.0))
var_list.append(beta)
var_list.append(gamma)
batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.9)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(train_phase,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
def _residual_block(x, trainable_vars, train_phase, apply_relu=True, name="unit"):
"""
ResNet block when the number of channels across the skip connections are the same
"""
in_channels = x.get_shape().as_list()[-1]
with tf.variable_scope(name) as scope:
shortcut = x
x = _conv(x, 3, in_channels, 1, trainable_vars, name='conv_1')
x = _bn(x, trainable_vars, train_phase, name="bn_1")
x = tf.nn.relu(x)
x = _conv(x, 3, in_channels, 1, trainable_vars, name='conv_2')
x = _bn(x, trainable_vars, train_phase, name="bn_2")
x = x + shortcut
if apply_relu == True:
x = tf.nn.relu(x)
return x
def _residual_block_first(x, out_channels, strides, trainable_vars, train_phase, apply_relu=True, name="unit", is_ATT_DATASET=False):
"""
A generic ResNet Block
"""
in_channels = x.get_shape().as_list()[-1]
with tf.variable_scope(name) as scope:
# Figure out the shortcut connection first
if in_channels == out_channels:
if strides == 1:
shortcut = tf.identity(x)
else:
shortcut = tf.nn.max_pool(x, [1, strides, strides, 1], [1, strides, strides, 1], 'VALID')
else:
shortcut = _conv(x, 1, out_channels, strides, trainable_vars, name="shortcut")
if not is_ATT_DATASET:
shortcut = _bn(shortcut, trainable_vars, train_phase, name="bn_0")
# Residual block
x = _conv(x, 3, out_channels, strides, trainable_vars, name="conv_1")
x = _bn(x, trainable_vars, train_phase, name="bn_1")
x = tf.nn.relu(x)
x = _conv(x, 3, out_channels, 1, trainable_vars, name="conv_2")
x = _bn(x, trainable_vars, train_phase, name="bn_2")
x = x + shortcut
if apply_relu:
x = tf.nn.relu(x)
return x
|
agem-main
|
utils/resnet_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Define utility functions for manipulating datasets
"""
import os
import numpy as np
import sys
from copy import deepcopy
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
import tarfile
import zipfile
import random
import cv2
#IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
IMG_MEAN = np.array((103.94,116.78,123.68), dtype=np.float32)
############################################################
### Data augmentation utils ################################
############################################################
def image_scaling(images):
"""
Randomly scales the images between 0.5 to 1.5 times the original size.
Args:
images: Training images to scale.
"""
scale = tf.random_uniform([1], minval=0.5, maxval=1.5, dtype=tf.float32, seed=None)
h_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(images)[1]), scale))
w_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(images)[2]), scale))
new_shape = tf.squeeze(tf.stack([h_new, w_new]), squeeze_dims=[1])
images = tf.image.resize_images(images, new_shape)
result = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), images)
return result
def random_crop_and_pad_image(images, crop_h, crop_w):
"""
Randomly crop and pads the input images.
Args:
images: Training i mages to crop/ pad.
crop_h: Height of cropped segment.
crop_w: Width of cropped segment.
"""
image_shape = tf.shape(images)
image_pad = tf.image.pad_to_bounding_box(images, 0, 0, tf.maximum(crop_h, image_shape[1]), tf.maximum(crop_w, image_shape[2]))
img_crop = tf.map_fn(lambda img: tf.random_crop(img, [crop_h,crop_w,3]), image_pad)
return img_crop
def random_horizontal_flip(x):
"""
Randomly flip a batch of images horizontally
Args:
x Tensor of shape B x H x W x C
Returns:
random_flipped Randomly flipped tensor of shape B x H x W x C
"""
# Define random horizontal flip
flips = [(slice(None, None, None), slice(None, None, random.choice([-1, None])), slice(None, None, None))
for _ in xrange(x.shape[0])]
random_flipped = np.array([img[flip] for img, flip in zip(x, flips)])
return random_flipped
############################################################
### AWA dataset utils #####################################
############################################################
def _AWA_read_img_from_file(data_dir, file_name, img_height, img_width):
count = 0
imgs = []
labels = []
def dense_to_one_hot(labels_dense, num_classes=50):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
with open(file_name) as f:
for line in f:
img_name, img_label = line.split()
img_file = data_dir.rstrip('\/') + '/' + img_name
img = cv2.imread(img_file).astype(np.float32)
# HWC -> WHC, compatible with caffe weights
#img = np.transpose(img, [1, 0, 2])
img = cv2.resize(img, (img_width, img_height))
# Convert RGB to BGR
img_r, img_g, img_b = np.split(img, 3, axis=2)
img = np.concatenate((img_b, img_g, img_r), axis=2)
# Extract mean
img -= IMG_MEAN
imgs += [img]
labels += [int(img_label)]
count += 1
if count % 1000 == 0:
print 'Finish reading {:07d}'.format(count)
# Convert the labels to one-hot
y = dense_to_one_hot(np.array(labels))
return np.array(imgs), y
def _AWA_get_data(data_dir, train_list_file, val_list_file, test_list_file, img_height, img_width):
""" Reads and parses examples from AWA dataset """
dataset = dict()
dataset['train'] = []
dataset['validation'] = []
dataset['test'] = []
num_val_img = 0 # you can change the number of validation images here TODO: Pass this as argument
train_img = []
train_label = []
validation_img = []
validation_label = []
test_img = []
test_label = []
# Read train, validation and test files
train_img, train_label = _AWA_read_img_from_file(data_dir, train_list_file, img_height, img_width)
#validation_img, validation_label = _AWA_read_img_from_file(data_dir, val_list_file, img_height, img_width)
test_img, test_label = _AWA_read_img_from_file(data_dir, test_list_file, img_height, img_width)
dataset['train'].append(train_img)
dataset['train'].append(train_label)
#dataset['validation'].append(validation_img)
#dataset['validation'].append(validation_label)
dataset['test'].append(test_img)
dataset['test'].append(test_label)
return dataset
def construct_split_awa(task_labels, data_dir, train_list_file, val_list_file, test_list_file, img_height, img_width, attr_file=None):
"""
Construct Split AWA dataset
Args:
task_labels Labels of different tasks
data_dir Data directory from where the AWA dataset will be read
train_list_file File containing names of training images
al_list_file File containing names of val images
test_list_file File containing names of test images
img_height Height of image
img_width Width of image
attr_file File from where to load the attributes
"""
# Get the awa dataset
awa_data = _AWA_get_data(data_dir, train_list_file, val_list_file, test_list_file, img_height, img_width)
# Get the attribute vector
if attr_file:
with open(attr_file, 'rb') as f:
awa_attr = pickle.load(f)
# Define a list for storing the data for different tasks
datasets = []
# Data splits
#sets = ["train", "validation", "test"]
sets = ["train", "test"]
for task in task_labels:
for set_name in sets:
this_set = awa_data[set_name]
global_class_indices = np.column_stack(np.nonzero(this_set[1]))
count = 0
for cls in task:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] ==
cls][:,np.array([True, False])])
else:
class_indices = np.append(class_indices, np.squeeze(global_class_indices[global_class_indices[:,1] ==\
cls][:,np.array([True, False])]))
count += 1
class_indices = np.sort(class_indices, axis=None)
if set_name == "train":
train = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
elif set_name == "validation":
validation = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
elif set_name == "test":
test = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
awa = {
'train': train,
#'validation': validation,
'test': test,
}
datasets.append(awa)
if attr_file:
return datasets, awa_attr
else:
return datasets
############################################################
### CUB dataset utils #####################################
############################################################
def _CUB_read_img_from_file(data_dir, file_name, img_height, img_width):
count = 0
imgs = []
labels = []
def dense_to_one_hot(labels_dense, num_classes=200):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
with open(file_name) as f:
for line in f:
img_name, img_label = line.split()
img_file = data_dir.rstrip('\/') + '/' + img_name
img = cv2.imread(img_file).astype(np.float32)
# HWC -> WHC, compatible with caffe weights
#img = np.transpose(img, [1, 0, 2])
img = cv2.resize(img, (img_width, img_height))
# Convert RGB to BGR
img_r, img_g, img_b = np.split(img, 3, axis=2)
img = np.concatenate((img_b, img_g, img_r), axis=2)
# Extract mean
img -= IMG_MEAN
imgs += [img]
labels += [int(img_label)]
count += 1
if count % 1000 == 0:
print 'Finish reading {:07d}'.format(count)
# Convert the labels to one-hot
y = dense_to_one_hot(np.array(labels))
return np.array(imgs), y
def _CUB_get_data(data_dir, train_list_file, test_list_file, img_height, img_width):
""" Reads and parses examples from CUB dataset """
dataset = dict()
dataset['train'] = []
dataset['test'] = []
num_val_img = 0 # you can change the number of validation images here TODO: Pass this as argument
train_img = []
train_label = []
test_img = []
test_label = []
# Read train and test files
train_img, train_label = _CUB_read_img_from_file(data_dir, train_list_file, img_height, img_width)
test_img, test_label = _CUB_read_img_from_file(data_dir, test_list_file, img_height, img_width)
dataset['train'].append(train_img)
dataset['train'].append(train_label)
dataset['test'].append(test_img)
dataset['test'].append(test_label)
return dataset
def construct_split_cub(task_labels, data_dir, train_list_file, test_list_file, img_height, img_width, attr_file=None):
"""
Construct Split CUB-200 dataset
Args:
task_labels Labels of different tasks
data_dir Data directory from where the CUB-200 dataset will be read
train_list_file File containing names of training images
test_list_file File containing names of test images
img_height Height of image
img_width Width of image
attr_fil File from where to load the attributes
"""
# Get the cub dataset
cub_data = _CUB_get_data(data_dir, train_list_file, test_list_file, img_height, img_width)
# Get the attribute vector
if attr_file:
with open(attr_file, 'rb') as f:
cub_attr = pickle.load(f)
# Define a list for storing the data for different tasks
datasets = []
# Data splits
sets = ["train", "test"]
for task in task_labels:
for set_name in sets:
this_set = cub_data[set_name]
global_class_indices = np.column_stack(np.nonzero(this_set[1]))
count = 0
for cls in task:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] ==
cls][:,np.array([True, False])])
else:
class_indices = np.append(class_indices, np.squeeze(global_class_indices[global_class_indices[:,1] ==\
cls][:,np.array([True, False])]))
count += 1
class_indices = np.sort(class_indices, axis=None)
if set_name == "train":
train = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
elif set_name == "test":
test = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
cub = {
'train': train,
'test': test,
}
datasets.append(cub)
if attr_file:
return datasets, cub_attr
else:
return datasets
############################################################
### CIFAR download utils ###################################
############################################################
CIFAR_10_URL = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
CIFAR_100_URL = "http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
CIFAR_10_DIR = "/cifar_10"
CIFAR_100_DIR = "/cifar_100"
def construct_split_cifar(task_labels, is_cifar_100=True):
"""
Construct Split CIFAR-10 and CIFAR-100 datasets
Args:
task_labels Labels of different tasks
data_dir Data directory where the CIFAR data will be saved
"""
data_dir = 'CIFAR_data'
# Get the cifar dataset
cifar_data = _get_cifar(data_dir, is_cifar_100)
# Define a list for storing the data for different tasks
datasets = []
# Data splits
sets = ["train", "validation", "test"]
for task in task_labels:
for set_name in sets:
this_set = cifar_data[set_name]
global_class_indices = np.column_stack(np.nonzero(this_set[1]))
count = 0
for cls in task:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] ==
cls][:,np.array([True, False])])
else:
class_indices = np.append(class_indices, np.squeeze(global_class_indices[global_class_indices[:,1] ==\
cls][:,np.array([True, False])]))
count += 1
class_indices = np.sort(class_indices, axis=None)
if set_name == "train":
train = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
elif set_name == "validation":
validation = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
elif set_name == "test":
test = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
cifar = {
'train': train,
'validation': validation,
'test': test,
}
datasets.append(cifar)
return datasets
def _get_cifar(data_dir, is_cifar_100):
"""
Get the CIFAR-10 and CIFAR-100 datasets
Args:
data_dir Directory where the downloaded data will be stored
"""
x_train = None
y_train = None
x_validation = None
y_validation = None
x_test = None
y_test = None
l = None
# Download the dataset if needed
_cifar_maybe_download_and_extract(data_dir)
# Dictionary to store the dataset
dataset = dict()
dataset['train'] = []
dataset['validation'] = []
dataset['test'] = []
def dense_to_one_hot(labels_dense, num_classes=100):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
if is_cifar_100:
# Load the training data of CIFAR-100
f = open(data_dir + CIFAR_100_DIR + '/train', 'rb')
datadict = pickle.load(f)
f.close()
_X = datadict['data']
_Y = np.array(datadict['fine_labels'])
_Y = dense_to_one_hot(_Y, num_classes=100)
_X = np.array(_X, dtype=float) / 255.0
_X = _X.reshape([-1, 3, 32, 32])
_X = _X.transpose([0, 2, 3, 1])
# Compute the data mean for normalization
x_train_mean = np.mean(_X, axis=0)
x_train = _X[:40000]
y_train = _Y[:40000]
x_validation = _X[40000:]
y_validation = _Y[40000:]
else:
# Load all the training batches of the CIFAR-10
for i in range(5):
f = open(data_dir + CIFAR_10_DIR + '/data_batch_' + str(i + 1), 'rb')
datadict = pickle.load(f)
f.close()
_X = datadict['data']
_Y = np.array(datadict['labels'])
_Y = dense_to_one_hot(_Y, num_classes=10)
_X = np.array(_X, dtype=float) / 255.0
_X = _X.reshape([-1, 3, 32, 32])
_X = _X.transpose([0, 2, 3, 1])
if x_train is None:
x_train = _X
y_train = _Y
else:
x_train = np.concatenate((x_train, _X), axis=0)
y_train = np.concatenate((y_train, _Y), axis=0)
# Compute the data mean for normalization
x_train_mean = np.mean(x_train, axis=0)
x_validation = x_train[:40000] # We don't use validation set with CIFAR-10
y_validation = y_train[40000:]
# Normalize the train and validation sets
x_train -= x_train_mean
x_validation -= x_train_mean
dataset['train'].append(x_train)
dataset['train'].append(y_train)
dataset['train'].append(l)
dataset['validation'].append(x_validation)
dataset['validation'].append(y_validation)
dataset['validation'].append(l)
if is_cifar_100:
# Load the test batch of CIFAR-100
f = open(data_dir + CIFAR_100_DIR + '/test', 'rb')
datadict = pickle.load(f)
f.close()
_X = datadict['data']
_Y = np.array(datadict['fine_labels'])
_Y = dense_to_one_hot(_Y, num_classes=100)
else:
# Load the test batch of CIFAR-10
f = open(data_dir + CIFAR_10_DIR + '/test_batch', 'rb')
datadict = pickle.load(f)
f.close()
_X = datadict["data"]
_Y = np.array(datadict['labels'])
_Y = dense_to_one_hot(_Y, num_classes=10)
_X = np.array(_X, dtype=float) / 255.0
_X = _X.reshape([-1, 3, 32, 32])
_X = _X.transpose([0, 2, 3, 1])
x_test = _X
y_test = _Y
# Normalize the test set
x_test -= x_train_mean
dataset['test'].append(x_test)
dataset['test'].append(y_test)
dataset['test'].append(l)
return dataset
def _print_download_progress(count, block_size, total_size):
"""
Show the download progress of the cifar data
"""
pct_complete = float(count * block_size) / total_size
msg = "\r- Download progress: {0:.1%}".format(pct_complete)
sys.stdout.write(msg)
sys.stdout.flush()
def _cifar_maybe_download_and_extract(data_dir):
"""
Routine to download and extract the cifar dataset
Args:
data_dir Directory where the downloaded data will be stored
"""
cifar_10_directory = data_dir + CIFAR_10_DIR
cifar_100_directory = data_dir + CIFAR_100_DIR
# If the data_dir does not exist, create the directory and download
# the data
if not os.path.exists(data_dir):
os.makedirs(data_dir)
url = CIFAR_10_URL
filename = url.split('/')[-1]
file_path = os.path.join(data_dir, filename)
zip_cifar_10 = file_path
file_path, _ = urlretrieve(url=url, filename=file_path, reporthook=_print_download_progress)
print()
print("Download finished. Extracting files.")
if file_path.endswith(".zip"):
zipfile.ZipFile(file=file_path, mode="r").extractall(data_dir)
elif file_path.endswith((".tar.gz", ".tgz")):
tarfile.open(name=file_path, mode="r:gz").extractall(data_dir)
print("Done.")
url = CIFAR_100_URL
filename = url.split('/')[-1]
file_path = os.path.join(data_dir, filename)
zip_cifar_100 = file_path
file_path, _ = urlretrieve(url=url, filename=file_path, reporthook=_print_download_progress)
print()
print("Download finished. Extracting files.")
if file_path.endswith(".zip"):
zipfile.ZipFile(file=file_path, mode="r").extractall(data_dir)
elif file_path.endswith((".tar.gz", ".tgz")):
tarfile.open(name=file_path, mode="r:gz").extractall(data_dir)
print("Done.")
os.rename(data_dir + "/cifar-10-batches-py", cifar_10_directory)
os.rename(data_dir + "/cifar-100-python", cifar_100_directory)
os.remove(zip_cifar_10)
os.remove(zip_cifar_100)
#########################################
## MNIST Utils ##########################
#########################################
def reformat_mnist(datasets):
"""
Routine to Reformat the mnist dataset into a 3d tensor
"""
image_size = 28 # Height of MNIST dataset
num_channels = 1 # Gray scale
for i in range(len(datasets)):
sets = ["train", "validation", "test"]
for set_name in sets:
datasets[i]['%s'%set_name]['images'] = datasets[i]['%s'%set_name]['images'].reshape\
((-1, image_size, image_size, num_channels)).astype(np.float32)
return datasets
def construct_permute_mnist(num_tasks):
"""
Construct a dataset of permutted mnist images
Args:
num_tasks Number of tasks
Returns
dataset A permutted mnist dataset
"""
# Download and store mnist dataset
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
datasets = []
for i in range(num_tasks):
perm_inds = range(mnist.train.images.shape[1])
np.random.shuffle(perm_inds)
copied_mnist = deepcopy(mnist)
sets = ["train", "validation", "test"]
for set_name in sets:
this_set = getattr(copied_mnist, set_name) # shallow copy
this_set._images = np.transpose(np.array([this_set.images[:,c] for c in perm_inds]))
if set_name == "train":
train = {
'images':this_set._images,
'labels':this_set.labels,
}
elif set_name == "validation":
validation = {
'images':this_set._images,
'labels':this_set.labels,
}
elif set_name == "test":
test = {
'images':this_set._images,
'labels':this_set.labels,
}
dataset = {
'train': train,
'validation': validation,
'test': test,
}
datasets.append(dataset)
return datasets
def construct_split_mnist(task_labels):
"""
Construct a split mnist dataset
Args:
task_labels List of split labels
Returns:
dataset A list of split datasets
"""
# Download and store mnist dataset
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
datasets = []
sets = ["train", "validation", "test"]
for task in task_labels:
for set_name in sets:
this_set = getattr(mnist, set_name)
global_class_indices = np.column_stack(np.nonzero(this_set.labels))
count = 0
for cls in task:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] ==\
cls][:,np.array([True, False])])
else:
class_indices = np.append(class_indices, np.squeeze(global_class_indices[global_class_indices[:,1] ==\
cls][:,np.array([True, False])]))
count += 1
class_indices = np.sort(class_indices, axis=None)
if set_name == "train":
train = {
'images':deepcopy(mnist.train.images[class_indices, :]),
'labels':deepcopy(mnist.train.labels[class_indices, :]),
}
elif set_name == "validation":
validation = {
'images':deepcopy(mnist.validation.images[class_indices, :]),
'labels':deepcopy(mnist.validation.labels[class_indices, :]),
}
elif set_name == "test":
test = {
'images':deepcopy(mnist.test.images[class_indices, :]),
'labels':deepcopy(mnist.test.labels[class_indices, :]),
}
mnist2 = {
'train': train,
'validation': validation,
'test': test,
}
datasets.append(mnist2)
return datasets
###################################################
###### ImageNet Utils #############################
###################################################
def construct_split_imagenet(task_labels, data_dir):
"""
Construct Split ImageNet dataset
Args:
task_labels Labels of different tasks
data_dir Data directory from where to load the imagenet data
"""
# Load the imagenet dataset
imagenet_data = _load_imagenet(data_dir)
# Define a list for storing the data for different tasks
datasets = []
# Data splits
sets = ["train", "test"]
for task in task_labels:
for set_name in sets:
this_set = imagenet_data[set_name]
global_class_indices = np.column_stack(np.nonzero(this_set[1]))
count = 0
for cls in task:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] ==
cls][:,np.array([True, False])])
else:
class_indices = np.append(class_indices, np.squeeze(global_class_indices[global_class_indices[:,1] ==\
cls][:,np.array([True, False])]))
count += 1
class_indices = np.sort(class_indices, axis=None)
if set_name == "train":
train = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
elif set_name == "test":
test = {
'images':deepcopy(this_set[0][class_indices, :]),
'labels':deepcopy(this_set[1][class_indices, :]),
}
imagenet = {
'train': train,
'test': test,
}
datasets.append(imagenet)
return datasets
def _load_imagenet(data_dir):
"""
Load the ImageNet data
Args:
data_dir Directory where the pickle files have been dumped
"""
x_train = None
y_train = None
x_test = None
y_test = None
# Dictionary to store the dataset
dataset = dict()
dataset['train'] = []
dataset['test'] = []
def dense_to_one_hot(labels_dense, num_classes=100):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
# Load the training batches
for i in range(4):
f = open(data_dir + '/train_batch_' + str(i), 'rb')
datadict = pickle.load(f)
f.close()
_X = datadict['data']
_Y = np.array(datadict['labels'])
# Convert the lables to one-hot
_Y = dense_to_one_hot(_Y)
# Normalize the images
_X = np.array(_X, dtype=float)/ 255.0
_X = _X.reshape([-1, 224, 224, 3])
if x_train is None:
x_train = _X
y_train = _Y
else:
x_train = np.concatenate((x_train, _X), axis=0)
y_train = np.concatenate((y_train, _Y), axis=0)
dataset['train'].append(x_train)
dataset['train'].append(y_train)
# Load test batches
for i in range(4):
f = open(data_dir + '/test_batch_' + str(i), 'rb')
datadict = pickle.load(f)
f.close()
_X = datadict['data']
_Y = np.array(datadict['labels'])
# Convert the lables to one-hot
_Y = dense_to_one_hot(_Y)
# Normalize the images
_X = np.array(_X, dtype=float)/ 255.0
_X = _X.reshape([-1, 224, 224, 3])
if x_test is None:
x_test = _X
y_test = _Y
else:
x_test = np.concatenate((x_test, _X), axis=0)
y_test = np.concatenate((y_test, _Y), axis=0)
dataset['test'].append(x_test)
dataset['test'].append(y_test)
return dataset
|
agem-main
|
utils/data_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Define some utility functions
"""
import numpy as np
import tensorflow as tf
def clone_variable_list(variable_list):
"""
Clone the variable list
"""
return [tf.identity(var) for var in variable_list]
def create_fc_layer(input, w, b, apply_relu=True):
"""
Construct a Fully Connected layer
Args:
w Weights
b Biases
apply_relu Apply relu (T/F)?
Returns:
Output of an FC layer
"""
with tf.name_scope('fc_layer'):
output = tf.matmul(input, w) + b
# Apply relu
if apply_relu:
output = tf.nn.relu(output)
return output
def create_conv_layer(input, w, b, stride=1, apply_relu=True):
"""
Construct a convolutional layer
Args:
w Weights
b Biases
pre_activations List where the pre_activations will be stored
apply_relu Apply relu (T/F)?
Returns:
Output of a conv layer
"""
with tf.name_scope('conv_layer'):
# Do the convolution operation
output = tf.nn.conv2d(input, w, [1, stride, stride, 1], padding='SAME') + b
# Apply relu
if apply_relu:
output = tf.nn.relu(output)
return output
def load_task_specific_data_in_proportion(datasets, task_labels, classes_appearing_in_tasks, class_seen_already):
"""
Loads task specific data from the datasets proportionate to classes appearing in different tasks
"""
global_class_indices = np.column_stack(np.nonzero(datasets['labels']))
count = 0
for cls in task_labels:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])])
total_class_instances = class_indices.size
num_instances_to_choose = total_class_instances // classes_appearing_in_tasks[cls]
offset = (class_seen_already[cls] - 1) * num_instances_to_choose
final_class_indices = class_indices[offset: offset+num_instances_to_choose]
else:
current_class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])])
total_class_instances = current_class_indices.size
num_instances_to_choose = total_class_instances // classes_appearing_in_tasks[cls]
offset = (class_seen_already[cls] - 1) * num_instances_to_choose
final_class_indices = np.append(final_class_indices, current_class_indices[offset: offset+num_instances_to_choose])
count += 1
final_class_indices = np.sort(final_class_indices, axis=None)
return datasets['images'][final_class_indices, :], datasets['labels'][final_class_indices, :]
def load_task_specific_data(datasets, task_labels):
"""
Loads task specific data from the datasets
"""
global_class_indices = np.column_stack(np.nonzero(datasets['labels']))
count = 0
for cls in task_labels:
if count == 0:
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])])
else:
class_indices = np.append(class_indices, np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])]))
count += 1
class_indices = np.sort(class_indices, axis=None)
return datasets['images'][class_indices, :], datasets['labels'][class_indices, :]
def samples_for_each_class(dataset_labels, task):
"""
Numbers of samples for each class in the task
Args:
dataset_labels Labels to count samples from
task Labels with in a task
Returns
"""
num_samples = np.zeros([len(task)], dtype=np.float32)
i = 0
for label in task:
global_class_indices = np.column_stack(np.nonzero(dataset_labels))
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == label][:,np.array([True, False])])
class_indices = np.sort(class_indices, axis=None)
num_samples[i] = len(class_indices)
i += 1
return num_samples
def get_sample_weights(labels, tasks):
weights = np.zeros([labels.shape[0]], dtype=np.float32)
for label in tasks:
global_class_indices = np.column_stack(np.nonzero(labels))
class_indices = np.array(np.squeeze(global_class_indices[global_class_indices[:,1] == label][:,np.array([True, False])]))
total_class_samples = class_indices.shape[0]
weights[class_indices] = 1.0/ total_class_samples
# Rescale the weights such that min is 1. This will make the weights of less observed
# examples 1.
weights /= weights.min()
return weights
def update_episodic_memory_with_less_data(task_dataset, importance_array, total_mem_size, task, episodic_images, episodic_labels, task_labels=None, is_herding=False):
"""
Update the episodic memory when the task data is less than the memory size
Args:
Returns:
"""
num_examples_in_task = task_dataset['images'].shape[0]
# Empty spaces in the episodic memory
empty_spaces = np.sum(np.sum(episodic_labels, axis=1) == 0)
if empty_spaces >= num_examples_in_task:
# Find where the empty spaces are in order
empty_indices = np.where(np.sum(episodic_labels, axis=1) == 0)[0]
# Store the whole task data in the episodic memory
episodic_images[empty_indices[:num_examples_in_task]] = task_dataset['images']
episodic_labels[empty_indices[:num_examples_in_task]] = task_dataset['labels']
elif empty_spaces == 0:
# Compute the amount of space in the episodic memory for the new task
space_for_new_task = total_mem_size// (task + 1) # task 0, 1, ...
# Get the indices to update in the episodic memory
eps_mem_indices = np.random.choice(total_mem_size, space_for_new_task, replace=False) # Sample without replacement
# Get the indices of important samples from the task dataset
label_importance = importance_array + 1e-32
label_importance /= np.sum(label_importance) # Convert to a probability distribution
task_mem_indices = np.random.choice(num_examples_in_task, space_for_new_task, p=label_importance, replace=False) # Sample without replacement
# Update the episodic memory
episodic_images[eps_mem_indices] = task_dataset['images'][task_mem_indices]
episodic_labels[eps_mem_indices] = task_dataset['labels'][task_mem_indices]
else:
# When there is some free space but not enough to store the whole task
# Find where the empty spaces are in order
empty_indices = np.where(np.sum(episodic_labels, axis=1) == 0)[0]
# Store some of the examples from task in the memory
episodic_images[empty_indices] = task_dataset['images'][:len(empty_indices)]
episodic_labels[empty_indices] = task_dataset['labels'][:len(empty_indices)]
# Adjust the remanining samples in the episodic memory
space_for_new_task = (total_mem_size // (task + 1)) - len(empty_indices) # task 0, 1, ...
# Get the indices to update in the episodic memory
eps_mem_indices = np.random.choice((total_mem_size - len(empty_indices)), space_for_new_task, replace=False) # Sample without replacement
# Get the indices of important samples from the task dataset
label_importance = importance_array[len(empty_indices):] + 1e-32
label_importance /= np.sum(label_importance) # Convert to a probability distribution
updated_num_examples_in_task = num_examples_in_task - len(empty_indices)
task_mem_indices = np.random.choice(updated_num_examples_in_task, space_for_new_task, p=label_importance, replace=False) # Sample without replacement
task_mem_indices += len(empty_indices) # Add the offset
# Update the episodic memory
episodic_images[eps_mem_indices] = task_dataset['images'][task_mem_indices]
episodic_labels[eps_mem_indices] = task_dataset['labels'][task_mem_indices]
def update_episodic_memory(task_dataset, importance_array, total_mem_size, task, episodic_images, episodic_labels, task_labels=None, is_herding=False):
"""
Update the episodic memory with new task data
Args:
Reruns:
"""
num_examples_in_task = task_dataset['images'].shape[0]
# Compute the amount of space in the episodic memory for the new task
space_for_new_task = total_mem_size// (task + 1) # task 0, 1, ...
# Get the indices to update in the episodic memory
eps_mem_indices = np.random.choice(total_mem_size, space_for_new_task, replace=False) # Sample without replacement
if is_herding and task_labels is not None:
# Get the samples based on herding
imp_images, imp_labels = sample_from_dataset_icarl(task_dataset, importance_array, task_labels, space_for_new_task//len(task_labels))
episodic_images[eps_mem_indices[np.arange(imp_images.shape[0])]] = imp_images
episodic_labels[eps_mem_indices[np.arange(imp_images.shape[0])]] = imp_labels
else:
# Get the indices of important samples from the task dataset
label_importance = importance_array + 1e-32
label_importance /= np.sum(label_importance) # Convert to a probability distribution
task_mem_indices = np.random.choice(num_examples_in_task, space_for_new_task, p=label_importance, replace=False) # Sample without replacement
# Update the episodic memory
episodic_images[eps_mem_indices] = task_dataset['images'][task_mem_indices]
episodic_labels[eps_mem_indices] = task_dataset['labels'][task_mem_indices]
def sample_from_dataset(dataset, importance_array, task, samples_count, preds=None):
"""
Samples from a dataset based on a probability distribution
Args:
dataset Dataset to sample from
importance_array Importance scores (not necessarily have to be a prob distribution)
task Labels with in a task
samples_count Number of samples to return
Return:
images Important images
labels Important labels
"""
count = 0
# For each label in the task extract the important samples
for label in task:
global_class_indices = np.column_stack(np.nonzero(dataset['labels']))
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == label][:,np.array([True, False])])
class_indices = np.sort(class_indices, axis=None)
if (preds is not None):
# Find the indices where prediction match the correct label
pred_indices = np.where(preds == label)[0]
# Find the correct prediction indices
correct_pred_indices = np.intersect1d(pred_indices, class_indices)
else:
correct_pred_indices = class_indices
# Extract the importance for the label
label_importance = importance_array[correct_pred_indices] + 1e-32
label_importance /= np.sum(label_importance)
actual_samples_count = min(samples_count, np.count_nonzero(label_importance))
#print('Storing {} samples from {} class'.format(actual_samples_count, label))
# If no samples are correctly classified then skip saving the samples
if (actual_samples_count != 0):
# Extract the important indices
imp_indices = np.random.choice(correct_pred_indices, actual_samples_count, p=label_importance, replace=False)
if count == 0:
images = dataset['images'][imp_indices]
labels = dataset['labels'][imp_indices]
else:
images = np.vstack((images, dataset['images'][imp_indices]))
labels = np.vstack((labels, dataset['labels'][imp_indices]))
count += 1
if count != 0:
return images, labels
else:
return None, None
def concatenate_datasets(current_images, current_labels, prev_images, prev_labels):
"""
Concatnates current dataset with the previous one. This will be used for
adding important samples from the previous datasets
Args:
current_images Images of current dataset
current_labels Labels of current dataset
prev_images List containing images of previous datasets
prev_labels List containing labels of previous datasets
Returns:
images Concatenated images
labels Concatenated labels
"""
"""
images = current_images
labels = current_labels
for i in range(len(prev_images)):
images = np.vstack((images, prev_images[i]))
labels = np.vstack((labels, prev_labels[i]))
"""
images = np.concatenate((current_images, prev_images), axis=0)
labels = np.concatenate((current_labels, prev_labels), axis=0)
return images, labels
def sample_from_dataset_icarl(dataset, features, task, samples_count, preds=None):
"""
Samples from a dataset based on a icarl - mean of features
Args:
dataset Dataset to sample from
features Features - activation before the last layer
task Labels with in a task
samples_count Number of samples to return
Return:
images Important images
labels Important labels
"""
print('Herding based sampling!')
#samples_count = min(samples_count, dataset['images'].shape[0])
count = 0
# For each label in the task extract the important samples
for label in task:
global_class_indices = np.column_stack(np.nonzero(dataset['labels']))
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == label][:,np.array([True, False])])
class_indices = np.sort(class_indices, axis=None)
if (preds is not None):
# Find the indices where prediction match the correct label
pred_indices = np.where(preds == label)[0]
# Find the correct prediction indices
correct_pred_indices = np.intersect1d(pred_indices, class_indices)
else:
correct_pred_indices = class_indices
mean_feature = np.mean(features[correct_pred_indices, :], axis=0)
actual_samples_count = min(samples_count, len(correct_pred_indices))
# If no samples are correctly classified then skip saving the samples
imp_indices = np.zeros(actual_samples_count, dtype=np.int32)
sample_sum= np.zeros(mean_feature.shape)
if (actual_samples_count != 0):
# Extract the important indices
for i in range(actual_samples_count):
sample_mean = (features[correct_pred_indices, :] +
np.tile(sample_sum, [len(correct_pred_indices),1]))/ float(i + 1)
norm_distance = np.linalg.norm((np.tile(mean_feature, [len(correct_pred_indices),1])
- sample_mean), ord=2, axis=1)
imp_indices[i] = correct_pred_indices[np.argmin(norm_distance)]
sample_sum = sample_sum + features[imp_indices[i], :]
if count == 0:
images = dataset['images'][imp_indices]
labels = dataset['labels'][imp_indices]
else:
images = np.vstack((images, dataset['images'][imp_indices]))
labels = np.vstack((labels, dataset['labels'][imp_indices]))
count += 1
if count != 0:
return images, labels
else:
return None, None
def average_acc_stats_across_runs(data, key):
"""
Compute the average accuracy statistics (mean and std) across runs
"""
num_runs = data.shape[0]
avg_acc = np.zeros(num_runs)
for i in range(num_runs):
avg_acc[i] = np.mean(data[i][-1])
return avg_acc.mean()*100, avg_acc.std()*100
def average_fgt_stats_across_runs(data, key):
"""
Compute the forgetting statistics (mean and std) across runs
"""
num_runs = data.shape[0]
fgt = np.zeros(num_runs)
wst_fgt = np.zeros(num_runs)
for i in range(num_runs):
fgt[i] = compute_fgt(data[i])
return fgt.mean(), fgt.std()
def compute_fgt(data):
"""
Given a TxT data matrix, compute average forgetting at T-th task
"""
num_tasks = data.shape[0]
T = num_tasks - 1
fgt = 0.0
for i in range(T):
fgt += np.max(data[:T,i]) - data[T, i]
avg_fgt = fgt/ float(num_tasks - 1)
return avg_fgt
def update_reservior(current_image, current_label, episodic_images, episodic_labels, M, N):
"""
Update the episodic memory with current example using the reservior sampling
"""
if M > N:
episodic_images[N] = current_image
episodic_labels[N] = current_label
else:
j = np.random.randint(0, N)
if j < M:
episodic_images[j] = current_image
episodic_labels[j] = current_label
|
agem-main
|
utils/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Define some utility functions
"""
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.colors as colors
import matplotlib.cm as cmx
import matplotlib.pyplot as plt
import matplotlib.figure as figure
from six.moves import cPickle as pickle
def snapshot_experiment_eval(logdir, experiment_id, data):
"""
Store the output of the experiment in a file
"""
snapshot_file = logdir + '/' + experiment_id + '.pickle'
with open(snapshot_file, 'wb') as f:
pickle.dump(data, f)
print('Experimental Eval has been snapshotted to %s!'%(snapshot_file))
def snapshot_task_labels(logdir, experiment_id, data):
"""
Store the output of the experiment in a file
"""
snapshot_file = logdir + '/' + experiment_id + '_task_labels.pickle'
with open(snapshot_file, 'wb') as f:
pickle.dump(data, f)
print('Experimental Eval has been snapshotted to %s!'%(snapshot_file))
def snapshot_experiment_meta_data(logdir, experiment_id, exper_meta_data):
"""
Store the meta-data of the experiment in a file
"""
meta_file = logdir + '/' + experiment_id + '.txt'
with open(meta_file, 'wb') as f:
for key in exper_meta_data:
print('{}: {}'.format(key, exper_meta_data[key]))
f.write('{}:{} \n'.format(key, exper_meta_data[key]))
print('Experimental meta-data has been snapshotted to %s!'%(meta_file))
def plot_acc_multiple_runs(data, task_labels, valid_measures, n_stats, plot_name=None):
"""
Plots the accuracies
Args:
task_labels List of tasks
n_stats Number of runs
plot_name Name of the file where the plot will be saved
Returns:
"""
n_tasks = len(task_labels)
plt.figure(figsize=(14, 3))
axs = [plt.subplot(1,n_tasks+1,1)]
for i in range(1, n_tasks + 1):
axs.append(plt.subplot(1, n_tasks+1, i+1, sharex=axs[0], sharey=axs[0]))
fmt_chars = ['o', 's', 'd']
fmts = []
for i in range(len(valid_measures)):
fmts.append(fmt_chars[i%len(fmt_chars)])
plot_keys = sorted(data['mean'].keys())
for k, cval in enumerate(plot_keys):
label = "c=%g"%cval
mean_vals = data['mean'][cval]
std_vals = data['std'][cval]
for j in range(n_tasks+1):
plt.sca(axs[j])
errorbar_kwargs = dict(fmt="%s-"%fmts[k], markersize=5)
if j < n_tasks:
norm= np.sqrt(n_stats) # np.sqrt(n_stats) for SEM or 1 for STDEV
axs[j].errorbar(np.arange(n_tasks)+1, mean_vals[:, j], yerr=std_vals[:, j]/norm, label=label, **errorbar_kwargs)
else:
mean_stuff = []
std_stuff = []
for i in range(len(data['mean'][cval])):
mean_stuff.append(data['mean'][cval][i][:i+1].mean())
std_stuff.append(np.sqrt((data['std'][cval][i][:i+1]**2).sum())/(n_stats*np.sqrt(n_stats)))
plt.errorbar(range(1,n_tasks+1), mean_stuff, yerr=std_stuff, label="%s"%valid_measures[k], **errorbar_kwargs)
plt.xticks(np.arange(n_tasks)+1)
plt.xlim((1.0,5.5))
"""
# Uncomment this if clutter along y-axis needs to be removed
if j == 0:
axs[j].set_yticks([0.5,1])
else:
plt.setp(axs[j].get_yticklabels(), visible=False)
plt.ylim((0.45,1.1))
"""
for i, ax in enumerate(axs):
if i < n_tasks:
ax.set_title((['Task %d (%d to %d)'%(j+1,task_labels[j][0], task_labels[j][-1])\
for j in range(n_tasks)] + ['average'])[i], fontsize=8)
else:
ax.set_title("Average", fontsize=8)
ax.axhline(0.5, color='k', linestyle=':', label="chance", zorder=0)
handles, labels = axs[-1].get_legend_handles_labels()
# Reorder legend so chance is last
axs[-1].legend([handles[j] for j in [i for i in range(len(valid_measures)+1)]],
[labels[j] for j in [i for i in range(len(valid_measures)+1)]], loc='best', fontsize=6)
axs[0].set_xlabel("Tasks")
axs[0].set_ylabel("Accuracy")
plt.gcf().tight_layout()
plt.grid('on')
if plot_name == None:
plt.show()
else:
plt.savefig(plot_name)
def plot_histogram(data, n_bins=10, plot_name='my_hist'):
plt.hist(data, bins=n_bins)
plt.savefig(plot_name)
plt.close()
|
agem-main
|
utils/vis_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .model import Model
|
agem-main
|
model/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Model defintion
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
from utils import clone_variable_list, create_fc_layer, create_conv_layer
from utils.resnet_utils import _conv, _fc, _bn, _residual_block, _residual_block_first
from utils.vgg_utils import vgg_conv_layer, vgg_fc_layer
PARAM_XI_STEP = 1e-3
NEG_INF = -1e32
EPSILON = 1e-32
HYBRID_ALPHA = 0.5
TRAIN_ENTROPY_BASED_SUM = False
def weight_variable(shape, name='fc', init_type='default'):
"""
Define weight variables
Args:
shape Shape of the bias variable tensor
Returns:
A tensor of size shape initialized from a random normal
"""
with tf.variable_scope(name):
if init_type == 'default':
weights = tf.get_variable('weights', shape, tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.1))
#weights = tf.Variable(tf.truncated_normal(shape, stddev=0.1), name='weights')
elif init_type == 'zero':
weights = tf.get_variable('weights', shape, tf.float32, initializer=tf.constant_initializer(0.1))
#weights = tf.Variable(tf.constant(0.1, shape=shape, dtype=np.float32), name='weights')
return weights
def bias_variable(shape, name='fc'):
"""
Define bias variables
Args:
shape Shape of the bias variable tensor
Returns:
A tensor of size shape initialized from a constant
"""
with tf.variable_scope(name):
biases = tf.get_variable('biases', shape, initializer=tf.constant_initializer(0.1))
return biases
#return tf.Variable(tf.constant(0.1, shape=shape, dtype=np.float32), name='biases') #TODO: Should we initialize it from 0
class Model:
"""
A class defining the model
"""
def __init__(self, x_train, y_, num_tasks, opt, imp_method, synap_stgth, fisher_update_after, fisher_ema_decay, network_arch='FC-S',
is_ATT_DATASET=False, x_test=None, attr=None):
"""
Instantiate the model
"""
# Define some placeholders which are used to feed the data to the model
self.y_ = y_
if imp_method == 'PNN':
self.train_phase = []
self.total_classes = int(self.y_[0].get_shape()[1])
self.train_phase = [tf.placeholder(tf.bool, name='train_phase_%d'%(i)) for i in range(num_tasks)]
self.output_mask = [tf.placeholder(dtype=tf.float32, shape=[self.total_classes]) for i in range(num_tasks)]
else:
self.total_classes = int(self.y_.get_shape()[1])
self.train_phase = tf.placeholder(tf.bool, name='train_phase')
if (imp_method == 'A-GEM' or imp_method == 'ER') and 'FC-' not in network_arch: # Only for Split-X setups
self.output_mask = [tf.placeholder(dtype=tf.float32, shape=[self.total_classes]) for i in range(num_tasks)]
self.mem_batch_size = tf.placeholder(dtype=tf.float32, shape=())
else:
self.output_mask = tf.placeholder(dtype=tf.float32, shape=[self.total_classes])
self.sample_weights = tf.placeholder(tf.float32, shape=[None])
self.task_id = tf.placeholder(dtype=tf.int32, shape=())
self.store_grad_batches = tf.placeholder(dtype=tf.float32, shape=())
self.keep_prob = tf.placeholder(dtype=tf.float32, shape=())
self.train_samples = tf.placeholder(dtype=tf.float32, shape=())
self.training_iters = tf.placeholder(dtype=tf.float32, shape=())
self.train_step = tf.placeholder(dtype=tf.float32, shape=())
self.violation_count = tf.Variable(0, dtype=tf.float32, trainable=False)
self.is_ATT_DATASET = is_ATT_DATASET # To use a different (standard one) ResNet-18 for CUB
if x_test is not None:
# If CUB datatset then use augmented x (x_train) for training and non-augmented x (x_test) for testing
self.x = tf.cond(self.train_phase, lambda: tf.identity(x_train), lambda: tf.identity(x_test))
train_shape = x_train.get_shape().as_list()
x = tf.reshape(self.x, [-1, train_shape[1], train_shape[2], train_shape[3]])
else:
# We don't use data augmentation for other datasets
self.x = x_train
x = self.x
# Class attributes for zero shot transfer
self.class_attr = attr
if self.class_attr is not None:
self.attr_dims = int(self.class_attr.get_shape()[1])
# Save the arguments passed from the main script
self.opt = opt
self.num_tasks = num_tasks
self.imp_method = imp_method
self.fisher_update_after = fisher_update_after
self.fisher_ema_decay = fisher_ema_decay
self.network_arch = network_arch
# A scalar variable for previous syanpse strength
self.synap_stgth = tf.constant(synap_stgth, shape=[1], dtype=tf.float32)
self.triplet_loss_scale = 2.1
# Define different variables
self.weights_old = []
self.star_vars = []
self.small_omega_vars = []
self.big_omega_vars = []
self.big_omega_riemann_vars = []
self.fisher_diagonal_at_minima = []
self.hebbian_score_vars = []
self.running_fisher_vars = []
self.tmp_fisher_vars = []
self.max_fisher_vars = []
self.min_fisher_vars = []
self.max_score_vars = []
self.min_score_vars = []
self.normalized_score_vars = []
self.score_vars = []
self.normalized_fisher_at_minima_vars = []
self.weights_delta_old_vars = []
self.ref_grads = []
self.projected_gradients_list = []
if self.class_attr is not None:
self.loss_and_train_ops_for_attr_vector(x, self.y_)
else:
self.loss_and_train_ops_for_one_hot_vector(x, self.y_)
# Set the operations to reset the optimier when needed
self.reset_optimizer_ops()
####################################################################################
#### Internal APIs of the class. These should not be called/ exposed externally ####
####################################################################################
def loss_and_train_ops_for_one_hot_vector(self, x, y_):
"""
Loss and training operations for the training of one-hot vector based classification model
"""
# Define approproate network
if self.network_arch == 'FC-S':
input_dim = int(x.get_shape()[1])
layer_dims = [input_dim, 256, 256, self.total_classes]
if self.imp_method == 'PNN':
self.task_logits = []
self.task_pruned_logits = []
self.unweighted_entropy = []
for i in range(self.num_tasks):
if i == 0:
self.task_logits.append(self.init_fc_column_progNN(layer_dims, x))
self.task_pruned_logits.append(tf.where(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(self.task_logits[i])[0], 1]), self.task_logits[i], NEG_INF*tf.ones_like(self.task_logits[i])))
self.unweighted_entropy.append(tf.squeeze(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_[i], logits=self.task_pruned_logits[i])))) # mult by mean(y_[i]) puts unwaranted loss to 0
else:
self.task_logits.append(self.extensible_fc_column_progNN(layer_dims, x, i))
self.task_pruned_logits.append(tf.where(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(self.task_logits[i])[0], 1]), self.task_logits[i], NEG_INF*tf.ones_like(self.task_logits[i])))
self.unweighted_entropy.append(tf.squeeze(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_[i], logits=self.task_pruned_logits[i])))) # mult by mean(y_[i]) puts unwaranted loss to 0
else:
self.fc_variables(layer_dims)
logits = self.fc_feedforward(x, self.weights, self.biases)
elif self.network_arch == 'FC-B':
input_dim = int(x.get_shape()[1])
layer_dims = [input_dim, 2000, 2000, self.total_classes]
self.fc_variables(layer_dims)
logits = self.fc_feedforward(x, self.weights, self.biases)
elif self.network_arch == 'CNN':
num_channels = int(x.get_shape()[-1])
self.image_size = int(x.get_shape()[1])
kernels = [3, 3, 3, 3, 3]
depth = [num_channels, 32, 32, 64, 64, 512]
self.conv_variables(kernels, depth)
logits = self.conv_feedforward(x, self.weights, self.biases, apply_dropout=True)
elif self.network_arch == 'VGG':
# VGG-16
logits = self.vgg_16_conv_feedforward(x)
elif 'RESNET-' in self.network_arch:
if self.network_arch == 'RESNET-S':
# Same resnet-18 as used in GEM paper
kernels = [3, 3, 3, 3, 3]
filters = [20, 20, 40, 80, 160]
strides = [1, 0, 2, 2, 2]
elif self.network_arch == 'RESNET-B':
# Standard ResNet-18
kernels = [7, 3, 3, 3, 3]
filters = [64, 64, 128, 256, 512]
strides = [2, 0, 2, 2, 2]
if self.imp_method == 'PNN':
self.task_logits = []
self.task_pruned_logits = []
self.unweighted_entropy = []
for i in range(self.num_tasks):
if i == 0:
self.task_logits.append(self.init_resent_column_progNN(x, kernels, filters, strides))
else:
self.task_logits.append(self.extensible_resnet_column_progNN(x, kernels, filters, strides, i))
self.task_pruned_logits.append(tf.where(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(self.task_logits[i])[0], 1]), self.task_logits[i], NEG_INF*tf.ones_like(self.task_logits[i])))
self.unweighted_entropy.append(tf.squeeze(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_[i], logits=self.task_pruned_logits[i]))))
elif self.imp_method == 'A-GEM' or self.imp_method == 'ER':
logits = self.resnet18_conv_feedforward(x, kernels, filters, strides)
self.task_pruned_logits = []
self.unweighted_entropy = []
for i in range(self.num_tasks):
self.task_pruned_logits.append(tf.where(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(logits)[0], 1]), logits, NEG_INF*tf.ones_like(logits)))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=self.task_pruned_logits[i])
adjusted_entropy = tf.reduce_sum(tf.cast(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(y_)[0], 1]), dtype=tf.float32) * y_, axis=1) * cross_entropy
self.unweighted_entropy.append(tf.reduce_sum(adjusted_entropy)) # We will average it later on
else:
logits = self.resnet18_conv_feedforward(x, kernels, filters, strides)
# Prune the predictions to only include the classes for which
# the training data is present
if (self.imp_method != 'PNN') and ((self.imp_method != 'A-GEM' and self.imp_method != 'ER') or 'FC-' in self.network_arch):
self.pruned_logits = tf.where(tf.tile(tf.equal(self.output_mask[None,:], 1.0), [tf.shape(logits)[0], 1]), logits, NEG_INF*tf.ones_like(logits))
# Create list of variables for storing different measures
# Note: This method has to be called before calculating fisher
# or any other importance measure
self.init_vars()
# Different entropy measures/ loss definitions
if (self.imp_method != 'PNN') and ((self.imp_method != 'A-GEM' and self.imp_method != 'ER') or 'FC-' in self.network_arch):
self.mse = 2.0*tf.nn.l2_loss(self.pruned_logits) # tf.nn.l2_loss computes sum(T**2)/ 2
self.weighted_entropy = tf.reduce_mean(tf.losses.softmax_cross_entropy(y_,
self.pruned_logits, self.sample_weights, reduction=tf.losses.Reduction.NONE))
self.unweighted_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_,
logits=self.pruned_logits))
# Create operations for loss and gradient calculation
self.loss_and_gradients(self.imp_method)
if self.imp_method != 'PNN':
# Store the current weights before doing a train step
self.get_current_weights()
# For GEM variants train ops will be defined later
if 'GEM' not in self.imp_method:
# Define the training operation here as Pathint ops depend on the train ops
self.train_op()
# Create operations to compute importance depending on the importance methods
if self.imp_method == 'EWC':
self.create_fisher_ops()
elif self.imp_method == 'M-EWC':
self.create_fisher_ops()
self.create_pathint_ops()
self.combined_fisher_pathint_ops()
elif self.imp_method == 'PI':
self.create_pathint_ops()
elif self.imp_method == 'RWALK':
self.create_fisher_ops()
self.create_pathint_ops()
elif self.imp_method == 'MAS':
self.create_hebbian_ops()
elif self.imp_method == 'A-GEM' or self.imp_method == 'S-GEM':
self.create_stochastic_gem_ops()
if self.imp_method != 'PNN':
# Create weight save and store ops
self.weights_store_ops()
# Summary operations for visualization
tf.summary.scalar("unweighted_entropy", self.unweighted_entropy)
for v in self.trainable_vars:
tf.summary.histogram(v.name.replace(":", "_"), v)
self.merged_summary = tf.summary.merge_all()
# Accuracy measure
if (self.imp_method == 'PNN') or ((self.imp_method == 'A-GEM' or self.imp_method == 'ER') and 'FC-' not in self.network_arch):
self.correct_predictions = []
self.accuracy = []
for i in range(self.num_tasks):
if self.imp_method == 'PNN':
self.correct_predictions.append(tf.equal(tf.argmax(self.task_pruned_logits[i], 1), tf.argmax(y_[i], 1)))
else:
self.correct_predictions.append(tf.equal(tf.argmax(self.task_pruned_logits[i], 1), tf.argmax(y_, 1)))
self.accuracy.append(tf.reduce_mean(tf.cast(self.correct_predictions[i], tf.float32)))
else:
self.correct_predictions = tf.equal(tf.argmax(self.pruned_logits, 1), tf.argmax(y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_predictions, tf.float32))
def loss_and_train_ops_for_attr_vector(self, x, y_):
"""
Loss and training operations for the training of joined embedding model
"""
# Define approproate network
if self.network_arch == 'FC-S':
input_dim = int(x.get_shape()[1])
layer_dims = [input_dim, 256, 256, self.total_classes]
self.fc_variables(layer_dims)
logits = self.fc_feedforward(x, self.weights, self.biases)
elif self.network_arch == 'FC-B':
input_dim = int(x.get_shape()[1])
layer_dims = [input_dim, 2000, 2000, self.total_classes]
self.fc_variables(layer_dims)
logits = self.fc_feedforward(x, self.weights, self.biases)
elif self.network_arch == 'CNN':
num_channels = int(x.get_shape()[-1])
self.image_size = int(x.get_shape()[1])
kernels = [3, 3, 3, 3, 3]
depth = [num_channels, 32, 32, 64, 64, 512]
self.conv_variables(kernels, depth)
logits = self.conv_feedforward(x, self.weights, self.biases, apply_dropout=True)
elif self.network_arch == 'VGG':
# VGG-16
phi_x = self.vgg_16_conv_feedforward(x)
elif self.network_arch == 'RESNET-S':
# Standard ResNet-18
kernels = [3, 3, 3, 3, 3]
filters = [20, 20, 40, 80, 160]
strides = [1, 0, 2, 2, 2]
# Get the image features
phi_x = self.resnet18_conv_feedforward(x, kernels, filters, strides)
elif self.network_arch == 'RESNET-B':
# Standard ResNet-18
kernels = [7, 3, 3, 3, 3]
filters = [64, 64, 128, 256, 512]
strides = [2, 0, 2, 2, 2]
# Get the image features
phi_x = self.resnet18_conv_feedforward(x, kernels, filters, strides)
# Get the attributes embedding
attr_embed = self.get_attribute_embedding(self.class_attr) # Does not contain biases yet, Dimension: TOTAL_CLASSES x image_feature_dim
# Add the biases now
last_layer_biases = bias_variable([self.total_classes], name='attr_embed_b')
self.trainable_vars.append(last_layer_biases)
# Now that we have all the trainable variables, initialize the different book keeping variables
# Note: This method has to be called before calculating fisher
# or any other importance measure
self.init_vars()
# Compute the logits for the ZST case
zst_logits = tf.matmul(phi_x, tf.transpose(attr_embed)) + last_layer_biases
# Prune the predictions to only include the classes for which
# the training data is present
if self.imp_method == 'A-GEM':
pruned_zst_logits = []
self.unweighted_entropy = []
for i in range(self.num_tasks):
pruned_zst_logits.append(tf.where(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(zst_logits)[0], 1]), zst_logits, NEG_INF*tf.ones_like(zst_logits)))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=pruned_zst_logits[i])
adjusted_entropy = tf.reduce_sum(tf.cast(tf.tile(tf.equal(self.output_mask[i][None,:], 1.0), [tf.shape(y_)[0], 1]), dtype=tf.float32) * y_, axis=1) * cross_entropy
self.unweighted_entropy.append(tf.reduce_sum(adjusted_entropy))
else:
pruned_zst_logits = tf.where(tf.tile(tf.equal(self.output_mask[None,:], 1.0),
[tf.shape(zst_logits)[0], 1]), zst_logits, NEG_INF*tf.ones_like(zst_logits))
self.unweighted_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=pruned_zst_logits))
self.mse = 2.0*tf.nn.l2_loss(pruned_zst_logits) # tf.nn.l2_loss computes sum(T**2)/ 2
# Create operations for loss and gradient calculation
self.loss_and_gradients(self.imp_method)
# Store the current weights before doing a train step
self.get_current_weights()
if 'GEM' not in self.imp_method:
self.train_op()
# Create operations to compute importance depending on the importance methods
if self.imp_method == 'EWC':
self.create_fisher_ops()
elif self.imp_method == 'M-EWC':
self.create_fisher_ops()
self.create_pathint_ops()
self.combined_fisher_pathint_ops()
elif self.imp_method == 'PI':
self.create_pathint_ops()
elif self.imp_method == 'RWALK':
self.create_fisher_ops()
self.create_pathint_ops()
elif self.imp_method == 'MAS':
self.create_hebbian_ops()
elif (self.imp_method == 'A-GEM') or (self.imp_method == 'S-GEM'):
self.create_stochastic_gem_ops()
# Create weight save and store ops
self.weights_store_ops()
# Summary operations for visualization
tf.summary.scalar("triplet_loss", self.unweighted_entropy)
for v in self.trainable_vars:
tf.summary.histogram(v.name.replace(":", "_"), v)
self.merged_summary = tf.summary.merge_all()
# Accuracy measure
if self.imp_method == 'A-GEM' and 'FC-' not in self.network_arch:
self.correct_predictions = []
self.accuracy = []
for i in range(self.num_tasks):
self.correct_predictions.append(tf.equal(tf.argmax(pruned_zst_logits[i], 1), tf.argmax(y_, 1)))
self.accuracy.append(tf.reduce_mean(tf.cast(self.correct_predictions[i], tf.float32)))
else:
self.correct_predictions = tf.equal(tf.argmax(pruned_zst_logits, 1), tf.argmax(y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_predictions, tf.float32))
def init_fc_column_progNN(self, layer_dims, h, apply_dropout=False):
"""
Defines the first column of Progressive NN - FC Networks
"""
self.trainable_vars = []
self.h_pnn = []
self.trainable_vars.append([])
self.h_pnn.append([])
self.h_pnn[0].append(h)
for i in range(len(layer_dims)-1):
w = weight_variable([layer_dims[i], layer_dims[i+1]], name='fc_w_%d_t0'%(i))
b = bias_variable([layer_dims[i+1]], name='fc_b_%d_t0'%(i))
self.trainable_vars[0].append(w)
self.trainable_vars[0].append(b)
if i == len(layer_dims) - 2:
# Last layer (logits) - don't apply the relu
h = create_fc_layer(h, w, b, apply_relu=False)
else:
h = create_fc_layer(h, w, b)
if apply_dropout:
h = tf.nn.dropout(h, 1)
self.h_pnn[0].append(h)
return h
def extensible_fc_column_progNN(self, layer_dims, h, task, apply_dropout=False):
"""
Define the subsequent columns of the progressive NN - FC Networks
"""
self.trainable_vars.append([])
self.h_pnn.append([])
self.h_pnn[task].append(h)
for i in range(len(layer_dims)-1):
w = weight_variable([layer_dims[i], layer_dims[i+1]], name='fc_w_%d_t%d'%(i, task))
b = bias_variable([layer_dims[i+1]], name='fc_b_%d_t%d'%(i, task))
self.trainable_vars[task].append(w)
self.trainable_vars[task].append(b)
preactivation = create_fc_layer(h, w, b, apply_relu=False)
for tt in range(task):
U_w = weight_variable([layer_dims[i], layer_dims[i+1]], name='fc_uw_%d_t%d_tt%d'%(i, task, tt))
U_b = bias_variable([layer_dims[i+1]], name='fc_ub_%d_t%d_tt%d'%(i, task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
preactivation += create_fc_layer(self.h_pnn[tt][i], U_w, U_b, apply_relu=False)
if i == len(layer_dims) - 2:
# Last layer (logits) - don't apply the relu
h = preactivation
else:
# layer < last layer, apply relu
h = tf.nn.relu(preactivation)
if apply_dropout:
h = tf.nn.dropout(h)
self.h_pnn[task].append(h)
return h
def init_resent_column_progNN(self, x, kernels, filters, strides):
"""
Defines the first column of Progressive NN - ResNet-18
"""
self.trainable_vars = []
self.h_pnn = []
self.trainable_vars.append([])
self.h_pnn.append([])
self.h_pnn[0].append(x)
# Conv1
h = _conv(x, kernels[0], filters[0], strides[0], self.trainable_vars[0], name='conv_1_t0')
h = _bn(h, self.trainable_vars[0], self.train_phase[0], name='bn_1_t0')
h = tf.nn.relu(h)
self.h_pnn[0].append(h)
# Conv2_x
h = _residual_block(h, self.trainable_vars[0], self.train_phase[0], name='conv2_1_t0')
h = _residual_block(h, self.trainable_vars[0], self.train_phase[0], name='conv2_2_t0')
self.h_pnn[0].append(h)
# Conv3_x
h = _residual_block_first(h, filters[2], strides[2], self.trainable_vars[0], self.train_phase[0], name='conv3_1_t0', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[0], self.train_phase[0], name='conv3_2_t0')
self.h_pnn[0].append(h)
# Conv4_x
h = _residual_block_first(h, filters[3], strides[3], self.trainable_vars[0], self.train_phase[0], name='conv4_1_t0', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[0], self.train_phase[0], name='conv4_2_t0')
self.h_pnn[0].append(h)
# Conv5_x
h = _residual_block_first(h, filters[4], strides[4], self.trainable_vars[0], self.train_phase[0], name='conv5_1_t0', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[0], self.train_phase[0], name='conv5_2_t0')
self.h_pnn[0].append(h)
# Apply average pooling
h = tf.reduce_mean(h, [1, 2])
if self.network_arch == 'RESNET-S':
logits = _fc(h, self.total_classes, self.trainable_vars[0], name='fc_1_t0', is_cifar=True)
else:
logits = _fc(h, self.total_classes, self.trainable_vars[0], name='fc_1_t0')
self.h_pnn[0].append(logits)
return logits
def extensible_resnet_column_progNN(self, x, kernels, filters, strides, task):
"""
Define the subsequent columns of the progressive NN - ResNet-18
"""
self.trainable_vars.append([])
self.h_pnn.append([])
self.h_pnn[task].append(x)
# Conv1
h = _conv(x, kernels[0], filters[0], strides[0], self.trainable_vars[task], name='conv_1_t%d'%(task))
h = _bn(h, self.trainable_vars[task], self.train_phase[task], name='bn_1_t%d'%(task))
# Add lateral connections
for tt in range(task):
U_w = weight_variable([1, 1, self.h_pnn[tt][0].get_shape().as_list()[-1], h.get_shape().as_list()[-1]], name='conv_1_w_t%d_tt%d'%(task, tt))
U_b = bias_variable([h.get_shape().as_list()[-1]], name='conv_1_b_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
h += create_conv_layer(self.h_pnn[tt][0], U_w, U_b, apply_relu=False)
h = tf.nn.relu(h)
self.h_pnn[task].append(h)
# Conv2_x
h = _residual_block(h, self.trainable_vars[task], self.train_phase[task], name='conv2_1_t%d'%(task))
h = _residual_block(h, self.trainable_vars[task], self.train_phase[task], apply_relu=False, name='conv2_2_t%d'%(task))
# Add lateral connections
for tt in range(task):
U_w = weight_variable([1, 1, self.h_pnn[tt][1].get_shape().as_list()[-1], h.get_shape().as_list()[-1]], name='conv_2_w_t%d_tt%d'%(task, tt))
U_b = bias_variable([h.get_shape().as_list()[-1]], name='conv_2_b_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
h += create_conv_layer(self.h_pnn[tt][1], U_w, U_b, apply_relu=False)
h = tf.nn.relu(h)
self.h_pnn[task].append(h)
# Conv3_x
h = _residual_block_first(h, filters[2], strides[2], self.trainable_vars[task], self.train_phase[task], name='conv3_1_t%d'%(task), is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[task], self.train_phase[task], apply_relu=False, name='conv3_2_t%d'%(task))
# Add lateral connections
for tt in range(task):
U_w = weight_variable([1, 1, self.h_pnn[tt][2].get_shape().as_list()[-1], h.get_shape().as_list()[-1]], name='conv_3_w_t%d_tt%d'%(task, tt))
U_b = bias_variable([h.get_shape().as_list()[-1]], name='conv_3_b_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
h += create_conv_layer(self.h_pnn[tt][2], U_w, U_b, stride=strides[2], apply_relu=False)
h = tf.nn.relu(h)
self.h_pnn[task].append(h)
# Conv4_x
h = _residual_block_first(h, filters[3], strides[3], self.trainable_vars[task], self.train_phase[task], name='conv4_1_t%d'%(task), is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[task], self.train_phase[task], apply_relu=False, name='conv4_2_t%d'%(task))
# Add lateral connections
for tt in range(task):
U_w = weight_variable([1, 1, self.h_pnn[tt][3].get_shape().as_list()[-1], h.get_shape().as_list()[-1]], name='conv_4_w_t%d_tt%d'%(task, tt))
U_b = bias_variable([h.get_shape().as_list()[-1]], name='conv_4_b_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
h += create_conv_layer(self.h_pnn[tt][3], U_w, U_b, stride=strides[3], apply_relu=False)
h = tf.nn.relu(h)
self.h_pnn[task].append(h)
# Conv5_x
h = _residual_block_first(h, filters[4], strides[4], self.trainable_vars[task], self.train_phase[task], name='conv5_1_t%d'%(task), is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars[task], self.train_phase[task], apply_relu=False, name='conv5_2_t%d'%(task))
# Add lateral connections
for tt in range(task):
U_w = weight_variable([1, 1, self.h_pnn[tt][4].get_shape().as_list()[-1], h.get_shape().as_list()[-1]], name='conv_5_w_t%d_tt%d'%(task, tt))
U_b = bias_variable([h.get_shape().as_list()[-1]], name='conv_5_b_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
h += create_conv_layer(self.h_pnn[tt][4], U_w, U_b, stride=strides[4], apply_relu=False)
h = tf.nn.relu(h)
self.h_pnn[task].append(h)
# Apply average pooling
h = tf.reduce_mean(h, [1, 2])
if self.network_arch == 'RESNET-S':
logits = _fc(h, self.total_classes, self.trainable_vars[task], name='fc_1_t%d'%(task), is_cifar=True)
else:
logits = _fc(h, self.total_classes, self.trainable_vars[task], name='fc_1_t%d'%(task))
for tt in range(task):
h_tt = tf.reduce_mean(self.h_pnn[tt][5], [1, 2])
U_w = weight_variable([h_tt.get_shape().as_list()[1], self.total_classes], name='fc_uw_1_t%d_tt%d'%(task, tt))
U_b = bias_variable([self.total_classes], name='fc_ub_1_t%d_tt%d'%(task, tt))
self.trainable_vars[task].append(U_w)
self.trainable_vars[task].append(U_b)
logits += create_fc_layer(h_tt, U_w, U_b, apply_relu=False)
self.h_pnn[task].append(logits)
return logits
def fc_variables(self, layer_dims):
"""
Defines variables for a 3-layer fc network
Args:
Returns:
"""
self.weights = []
self.biases = []
self.trainable_vars = []
for i in range(len(layer_dims)-1):
w = weight_variable([layer_dims[i], layer_dims[i+1]], name='fc_%d'%(i))
b = bias_variable([layer_dims[i+1]], name='fc_%d'%(i))
self.weights.append(w)
self.biases.append(b)
self.trainable_vars.append(w)
self.trainable_vars.append(b)
def fc_feedforward(self, h, weights, biases, apply_dropout=False):
"""
Forward pass through a fc network
Args:
h Input image (tensor)
weights List of weights for a fc network
biases List of biases for a fc network
apply_dropout Whether to apply droupout (True/ False)
Returns:
Logits of a fc network
"""
if apply_dropout:
h = tf.nn.dropout(h, 1) # Apply dropout on Input?
for (w, b) in list(zip(weights, biases))[:-1]:
h = create_fc_layer(h, w, b)
if apply_dropout:
h = tf.nn.dropout(h, 1) # Apply dropout on hidden layers?
# Store image features
self.features = h
self.image_feature_dim = h.get_shape().as_list()[-1]
return create_fc_layer(h, weights[-1], biases[-1], apply_relu=False)
def conv_variables(self, kernel, depth):
"""
Defines variables of a 5xconv-1xFC convolutional network
Args:
Returns:
"""
self.weights = []
self.biases = []
self.trainable_vars = []
div_factor = 1
for i in range(len(kernel)):
w = weight_variable([kernel[i], kernel[i], depth[i], depth[i+1]], name='conv_%d'%(i))
b = bias_variable([depth[i+1]], name='conv_%d'%(i))
self.weights.append(w)
self.biases.append(b)
self.trainable_vars.append(w)
self.trainable_vars.append(b)
# Since we maxpool after every two conv layers
if ((i+1) % 2 == 0):
div_factor *= 2
flat_units = (self.image_size // div_factor) * (self.image_size // div_factor) * depth[-1]
w = weight_variable([flat_units, self.total_classes], name='fc_%d'%(i))
b = bias_variable([self.total_classes], name='fc_%d'%(i))
self.weights.append(w)
self.biases.append(b)
self.trainable_vars.append(w)
self.trainable_vars.append(b)
def conv_feedforward(self, h, weights, biases, apply_dropout=True):
"""
Forward pass through a convolutional network
Args:
h Input image (tensor)
weights List of weights for a conv network
biases List of biases for a conv network
apply_dropout Whether to apply droupout (True/ False)
Returns:
Logits of a conv network
"""
for i, (w, b) in enumerate(list(zip(weights, biases))[:-1]):
# Apply conv operation till the second last layer, which is a FC layer
h = create_conv_layer(h, w, b)
if ((i+1) % 2 == 0):
# Apply max pool after every two conv layers
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Apply dropout
if apply_dropout:
h = tf.nn.dropout(h, self.keep_prob)
# Construct FC layers
shape = h.get_shape().as_list()
h = tf.reshape(h, [-1, shape[1] * shape[2] * shape[3]])
# Store image features
self.features = h
self.image_feature_dim = h.get_shape().as_list()[-1]
return create_fc_layer(h, weights[-1], biases[-1], apply_relu=False)
def vgg_16_conv_feedforward(self, h):
"""
Forward pass through a VGG 16 network
Return:
Logits of a VGG 16 network
"""
self.trainable_vars = []
# Conv1
h = vgg_conv_layer(h, 3, 64, 1, self.trainable_vars, name='conv1_1')
h = vgg_conv_layer(h, 3, 64, 1, self.trainable_vars, name='conv1_2')
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
# Conv2
h = vgg_conv_layer(h, 3, 128, 1, self.trainable_vars, name='conv2_1')
h = vgg_conv_layer(h, 3, 128, 1, self.trainable_vars, name='conv2_2')
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# Conv3
h = vgg_conv_layer(h, 3, 256, 1, self.trainable_vars, name='conv3_1')
h = vgg_conv_layer(h, 3, 256, 1, self.trainable_vars, name='conv3_2')
h = vgg_conv_layer(h, 3, 256, 1, self.trainable_vars, name='conv3_3')
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3')
# Conv4
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv4_1')
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv4_2')
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv4_3')
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4')
# Conv5
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv5_1')
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv5_2')
h = vgg_conv_layer(h, 3, 512, 1, self.trainable_vars, name='conv5_3')
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool5')
# FC layers
shape = h.get_shape().as_list()
h = tf.reshape(h, [-1, shape[1] * shape[2] * shape[3]])
# fc6
h = vgg_fc_layer(h, 4096, self.trainable_vars, apply_relu=True, name='fc6')
# fc7
h = vgg_fc_layer(h, 4096, self.trainable_vars, apply_relu=True, name='fc7')
# Store image features
self.features = h
self.image_feature_dim = h.get_shape().as_list()[-1]
# fc8
if self.class_attr is not None:
# Return the image features
return h
else:
logits = vgg_fc_layer(h, self.total_classes, self.trainable_vars, apply_relu=False, name='fc8')
return logits
def resnet18_conv_feedforward(self, h, kernels, filters, strides):
"""
Forward pass through a ResNet-18 network
Returns:
Logits of a resnet-18 conv network
"""
self.trainable_vars = []
# Conv1
h = _conv(h, kernels[0], filters[0], strides[0], self.trainable_vars, name='conv_1')
h = _bn(h, self.trainable_vars, self.train_phase, name='bn_1')
h = tf.nn.relu(h)
# Conv2_x
h = _residual_block(h, self.trainable_vars, self.train_phase, name='conv2_1')
h = _residual_block(h, self.trainable_vars, self.train_phase, name='conv2_2')
# Conv3_x
h = _residual_block_first(h, filters[2], strides[2], self.trainable_vars, self.train_phase, name='conv3_1', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars, self.train_phase, name='conv3_2')
# Conv4_x
h = _residual_block_first(h, filters[3], strides[3], self.trainable_vars, self.train_phase, name='conv4_1', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars, self.train_phase, name='conv4_2')
# Conv5_x
h = _residual_block_first(h, filters[4], strides[4], self.trainable_vars, self.train_phase, name='conv5_1', is_ATT_DATASET=self.is_ATT_DATASET)
h = _residual_block(h, self.trainable_vars, self.train_phase, name='conv5_2')
# Apply average pooling
h = tf.reduce_mean(h, [1, 2])
# Store the feature mappings
self.features = h
self.image_feature_dim = h.get_shape().as_list()[-1]
if self.class_attr is not None:
# Return the image features
return h
else:
if self.network_arch == 'RESNET-S':
logits = _fc(h, self.total_classes, self.trainable_vars, name='fc_1', is_cifar=True)
else:
logits = _fc(h, self.total_classes, self.trainable_vars, name='fc_1')
return logits
def get_attribute_embedding(self, attr):
"""
Get attribute embedding using a simple FC network
Returns:
Embedding vector of k x ATTR_DIMS
"""
w = weight_variable([self.attr_dims, self.image_feature_dim], name='attr_embed_w')
self.trainable_vars.append(w)
# Return the inner product of attribute matrix and weight vector.
return tf.matmul(attr, w) # Dimension should be TOTAL_CLASSES x image_feature_dim
def loss_and_gradients(self, imp_method):
"""
Defines task based and surrogate losses and their
gradients
Args:
Returns:
"""
reg = 0.0
if imp_method == 'VAN' or imp_method == 'PNN' or imp_method == 'ER' or 'GEM' in imp_method:
pass
elif imp_method == 'EWC' or imp_method == 'M-EWC':
reg = tf.add_n([tf.reduce_sum(tf.square(w - w_star) * f) for w, w_star,
f in zip(self.trainable_vars, self.star_vars, self.normalized_fisher_at_minima_vars)])
elif imp_method == 'PI':
reg = tf.add_n([tf.reduce_sum(tf.square(w - w_star) * f) for w, w_star,
f in zip(self.trainable_vars, self.star_vars, self.big_omega_vars)])
elif imp_method == 'MAS':
reg = tf.add_n([tf.reduce_sum(tf.square(w - w_star) * f) for w, w_star,
f in zip(self.trainable_vars, self.star_vars, self.hebbian_score_vars)])
elif imp_method == 'RWALK':
reg = tf.add_n([tf.reduce_sum(tf.square(w - w_star) * (f + scr)) for w, w_star,
f, scr in zip(self.trainable_vars, self.star_vars, self.normalized_fisher_at_minima_vars,
self.normalized_score_vars)])
"""
# ***** DON't USE THIS WITH MULTI-HEAD SETTING SINCE THIS WILL UPDATE ALL THE WEIGHTS *****
# If CNN arch, then use the weight decay
if self.is_ATT_DATASET:
self.unweighted_entropy += tf.add_n([0.0005 * tf.nn.l2_loss(v) for v in self.trainable_vars if 'weights' in v.name or 'kernel' in v.name])
"""
if imp_method == 'PNN':
# Compute the gradients of regularized loss
self.reg_gradients_vars = []
for i in range(self.num_tasks):
self.reg_gradients_vars.append([])
self.reg_gradients_vars[i] = self.opt.compute_gradients(self.unweighted_entropy[i], var_list=self.trainable_vars[i])
elif imp_method != 'A-GEM': # For A-GEM we will define the losses and gradients later on
if imp_method == 'ER' and 'FC-' not in self.network_arch:
self.reg_loss = tf.add_n([self.unweighted_entropy[i] for i in range(self.num_tasks)])/ self.mem_batch_size
else:
# Regularized training loss
self.reg_loss = tf.squeeze(self.unweighted_entropy + self.synap_stgth * reg)
# Compute the gradients of the vanilla loss
self.vanilla_gradients_vars = self.opt.compute_gradients(self.unweighted_entropy,
var_list=self.trainable_vars)
# Compute the gradients of regularized loss
self.reg_gradients_vars = self.opt.compute_gradients(self.reg_loss,
var_list=self.trainable_vars)
def train_op(self):
"""
Defines the training operation (a single step during training)
Args:
Returns:
"""
if self.imp_method == 'VAN' or self.imp_method == 'ER':
# Define training operation
self.train = self.opt.apply_gradients(self.reg_gradients_vars)
elif self.imp_method == 'PNN':
# Define training operation
self.train = [self.opt.apply_gradients(self.reg_gradients_vars[i]) for i in range(self.num_tasks)]
elif self.imp_method == 'FTR_EXT':
# Define a training operation for the first and subsequent tasks
self.train = self.opt.apply_gradients(self.reg_gradients_vars)
self.train_classifier = self.opt.apply_gradients(self.reg_gradients_vars[-2:])
else:
# Get the value of old weights first
with tf.control_dependencies([self.weights_old_ops_grouped]):
# Define a training operation
self.train = self.opt.apply_gradients(self.reg_gradients_vars)
def init_vars(self):
"""
Defines different variables that will be used for the
weight consolidation
Args:
Returns:
"""
if self.imp_method == 'PNN':
return
for v in range(len(self.trainable_vars)):
# List of variables for weight updates
self.weights_old.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.weights_delta_old_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.star_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False,
name=self.trainable_vars[v].name.rsplit(':')[0]+'_star'))
# List of variables for pathint method
self.small_omega_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.big_omega_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.big_omega_riemann_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
# List of variables to store fisher information
self.fisher_diagonal_at_minima.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.normalized_fisher_at_minima_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False, dtype=tf.float32))
self.tmp_fisher_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.running_fisher_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.score_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
# New variables for conv setting for fisher and score normalization
self.max_fisher_vars.append(tf.Variable(tf.zeros(1), dtype=tf.float32, trainable=False))
self.min_fisher_vars.append(tf.Variable(tf.zeros(1), dtype=tf.float32, trainable=False))
self.max_score_vars.append(tf.Variable(tf.zeros(1), dtype=tf.float32, trainable=False))
self.min_score_vars.append(tf.Variable(tf.zeros(1), dtype=tf.float32, trainable=False))
self.normalized_score_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
if self.imp_method == 'MAS':
# List of variables to store hebbian information
self.hebbian_score_vars.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
elif self.imp_method == 'A-GEM' or self.imp_method == 'S-GEM':
self.ref_grads.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
self.projected_gradients_list.append(tf.Variable(tf.zeros(self.trainable_vars[v].get_shape()), trainable=False))
def get_current_weights(self):
"""
Get the values of current weights
Note: These weights are different from star_vars as those
store the weights after training for the last task.
Args:
Returns:
"""
weights_old_ops = []
weights_delta_old_ops = []
for v in range(len(self.trainable_vars)):
weights_old_ops.append(tf.assign(self.weights_old[v], self.trainable_vars[v]))
weights_delta_old_ops.append(tf.assign(self.weights_delta_old_vars[v], self.trainable_vars[v]))
self.weights_old_ops_grouped = tf.group(*weights_old_ops)
self.weights_delta_old_grouped = tf.group(*weights_delta_old_ops)
def weights_store_ops(self):
"""
Defines weight restoration operations
Args:
Returns:
"""
restore_weights_ops = []
set_star_vars_ops = []
for v in range(len(self.trainable_vars)):
restore_weights_ops.append(tf.assign(self.trainable_vars[v], self.star_vars[v]))
set_star_vars_ops.append(tf.assign(self.star_vars[v], self.trainable_vars[v]))
self.restore_weights = tf.group(*restore_weights_ops)
self.set_star_vars = tf.group(*set_star_vars_ops)
def reset_optimizer_ops(self):
"""
Defines operations to reset the optimizer
Args:
Returns:
"""
# Set the operation for resetting the optimizer
self.optimizer_slots = [self.opt.get_slot(var, name) for name in self.opt.get_slot_names()\
for var in tf.global_variables() if self.opt.get_slot(var, name) is not None]
self.slot_names = self.opt.get_slot_names()
self.opt_init_op = tf.variables_initializer(self.optimizer_slots)
def create_pathint_ops(self):
"""
Defines operations for path integral-based importance
Args:
Returns:
"""
reset_small_omega_ops = []
update_small_omega_ops = []
update_big_omega_ops = []
update_big_omega_riemann_ops = []
for v in range(len(self.trainable_vars)):
# Make sure that the variables are updated before calculating delta(theta)
with tf.control_dependencies([self.train]):
update_small_omega_ops.append(tf.assign_add(self.small_omega_vars[v],
-(self.vanilla_gradients_vars[v][0] * (self.trainable_vars[v] - self.weights_old[v]))))
# Ops to reset the small omega
reset_small_omega_ops.append(tf.assign(self.small_omega_vars[v], self.small_omega_vars[v]*0.0))
if self.imp_method == 'PI':
# Update the big omegas at the end of the task using the Eucldeian distance
update_big_omega_ops.append(tf.assign_add(self.big_omega_vars[v],
tf.nn.relu(tf.div(self.small_omega_vars[v], (PARAM_XI_STEP + tf.square(self.trainable_vars[v] - self.star_vars[v]))))))
elif self.imp_method == 'RWALK':
# Update the big omegas after small intervals using distance in riemannian manifold (KL-divergence)
update_big_omega_riemann_ops.append(tf.assign_add(self.big_omega_riemann_vars[v],
tf.nn.relu(tf.div(self.small_omega_vars[v],
(PARAM_XI_STEP + self.running_fisher_vars[v] * tf.square(self.trainable_vars[v] - self.weights_delta_old_vars[v]))))))
self.update_small_omega = tf.group(*update_small_omega_ops)
self.reset_small_omega = tf.group(*reset_small_omega_ops)
if self.imp_method == 'PI':
self.update_big_omega = tf.group(*update_big_omega_ops)
elif self.imp_method == 'RWALK':
self.update_big_omega_riemann = tf.group(*update_big_omega_riemann_ops)
self.big_omega_riemann_reset = [tf.assign(tensor, tf.zeros_like(tensor)) for tensor in self.big_omega_riemann_vars]
if self.imp_method == 'RWALK':
# For the first task, scale the scores so that division does not have an effect
self.scale_score = [tf.assign(s, s*2.0) for s in self.big_omega_riemann_vars]
# To reduce the rigidity after each task the importance scores are averaged
self.update_score = [tf.assign_add(scr, tf.div(tf.add(scr, riemm_omega), 2.0))
for scr, riemm_omega in zip(self.score_vars, self.big_omega_riemann_vars)]
# Get the min and max in each layer of the scores
self.get_max_score_vars = [tf.assign(var, tf.expand_dims(tf.squeeze(tf.reduce_max(scr, keepdims=True)),
axis=0)) for var, scr in zip(self.max_score_vars, self.score_vars)]
self.get_min_score_vars = [tf.assign(var, tf.expand_dims(tf.squeeze(tf.reduce_min(scr, keepdims=True)),
axis=0)) for var, scr in zip(self.min_score_vars, self.score_vars)]
self.max_score = tf.reduce_max(tf.convert_to_tensor(self.max_score_vars))
self.min_score = tf.reduce_min(tf.convert_to_tensor(self.min_score_vars))
with tf.control_dependencies([self.max_score, self.min_score]):
self.normalize_scores = [tf.assign(tgt, (var - self.min_score)/ (self.max_score - self.min_score + EPSILON))
for tgt, var in zip(self.normalized_score_vars, self.score_vars)]
# Sparsify all the layers except last layer
sparsify_score_ops = []
for v in range(len(self.normalized_score_vars) - 2):
sparsify_score_ops.append(tf.assign(self.normalized_score_vars[v],
tf.nn.dropout(self.normalized_score_vars[v], self.keep_prob)))
self.sparsify_scores = tf.group(*sparsify_score_ops)
def create_fisher_ops(self):
"""
Defines the operations to compute online update of Fisher
Args:
Returns:
"""
ders = tf.gradients(self.unweighted_entropy, self.trainable_vars)
fisher_ema_at_step_ops = []
fisher_accumulate_at_step_ops = []
# ops for running fisher
self.set_tmp_fisher = [tf.assign_add(f, tf.square(d)) for f, d in zip(self.tmp_fisher_vars, ders)]
# Initialize the running fisher to non-zero value
self.set_initial_running_fisher = [tf.assign(r_f, s_f) for r_f, s_f in zip(self.running_fisher_vars,
self.tmp_fisher_vars)]
self.set_running_fisher = [tf.assign(f, (1 - self.fisher_ema_decay) * f + (1.0/ self.fisher_update_after) *
self.fisher_ema_decay * tmp) for f, tmp in zip(self.running_fisher_vars, self.tmp_fisher_vars)]
self.get_fisher_at_minima = [tf.assign(var, f) for var, f in zip(self.fisher_diagonal_at_minima,
self.running_fisher_vars)]
self.reset_tmp_fisher = [tf.assign(tensor, tf.zeros_like(tensor)) for tensor in self.tmp_fisher_vars]
# Get the min and max in each layer of the Fisher
self.get_max_fisher_vars = [tf.assign(var, tf.expand_dims(tf.squeeze(tf.reduce_max(scr, keepdims=True)), axis=0))
for var, scr in zip(self.max_fisher_vars, self.fisher_diagonal_at_minima)]
self.get_min_fisher_vars = [tf.assign(var, tf.expand_dims(tf.squeeze(tf.reduce_min(scr, keepdims=True)), axis=0))
for var, scr in zip(self.min_fisher_vars, self.fisher_diagonal_at_minima)]
self.max_fisher = tf.reduce_max(tf.convert_to_tensor(self.max_fisher_vars))
self.min_fisher = tf.reduce_min(tf.convert_to_tensor(self.min_fisher_vars))
with tf.control_dependencies([self.max_fisher, self.min_fisher]):
self.normalize_fisher_at_minima = [tf.assign(tgt,
(var - self.min_fisher)/ (self.max_fisher - self.min_fisher + EPSILON))
for tgt, var in zip(self.normalized_fisher_at_minima_vars, self.fisher_diagonal_at_minima)]
self.clear_attr_embed_reg = tf.assign(self.normalized_fisher_at_minima_vars[-2], tf.zeros_like(self.normalized_fisher_at_minima_vars[-2]))
# Sparsify all the layers except last layer
sparsify_fisher_ops = []
for v in range(len(self.normalized_fisher_at_minima_vars) - 2):
sparsify_fisher_ops.append(tf.assign(self.normalized_fisher_at_minima_vars[v],
tf.nn.dropout(self.normalized_fisher_at_minima_vars[v], self.keep_prob)))
self.sparsify_fisher = tf.group(*sparsify_fisher_ops)
def combined_fisher_pathint_ops(self):
"""
Define the operations to refine Fisher information based on parameters convergence
Args:
Returns:
"""
#self.refine_fisher_at_minima = [tf.assign(f, f*(1.0/(s+1e-12))) for f, s in zip(self.fisher_diagonal_at_minima, self.small_omega_vars)]
self.refine_fisher_at_minima = [tf.assign(f, f*tf.exp(-100.0*s)) for f, s in zip(self.fisher_diagonal_at_minima, self.small_omega_vars)]
def create_hebbian_ops(self):
"""
Define operations for hebbian measure of importance (MAS)
"""
# Compute the gradients of mse loss
self.mse_gradients = tf.gradients(self.mse, self.trainable_vars)
#with tf.control_dependencies([self.mse_gradients]):
# Keep on adding gradients to the omega
self.accumulate_hebbian_scores = [tf.assign_add(omega, tf.abs(grad)) for omega, grad in zip(self.hebbian_score_vars, self.mse_gradients)]
# Average across the total images
self.average_hebbian_scores = [tf.assign(omega, omega*(1.0/self.train_samples)) for omega in self.hebbian_score_vars]
# Reset the hebbian importance variables
self.reset_hebbian_scores = [tf.assign(omega, tf.zeros_like(omega)) for omega in self.hebbian_score_vars]
def create_stochastic_gem_ops(self):
"""
Define operations for Stochastic GEM
"""
if 'FC-' in self.network_arch or self.imp_method == 'S-GEM':
self.agem_loss = self.unweighted_entropy
else:
self.agem_loss = tf.add_n([self.unweighted_entropy[i] for i in range(self.num_tasks)])/ self.mem_batch_size
ref_grads = tf.gradients(self.agem_loss, self.trainable_vars)
# Reference gradient for previous tasks
self.store_ref_grads = [tf.assign(ref, grad) for ref, grad in zip(self.ref_grads, ref_grads)]
flat_ref_grads = tf.concat([tf.reshape(grad, [-1]) for grad in self.ref_grads], 0)
# Grandient on the current task
task_grads = tf.gradients(self.agem_loss, self.trainable_vars)
flat_task_grads = tf.concat([tf.reshape(grad, [-1]) for grad in task_grads], 0)
with tf.control_dependencies([flat_task_grads]):
dotp = tf.reduce_sum(tf.multiply(flat_task_grads, flat_ref_grads))
ref_mag = tf.reduce_sum(tf.multiply(flat_ref_grads, flat_ref_grads))
proj = flat_task_grads - ((dotp/ ref_mag) * flat_ref_grads)
self.reset_violation_count = self.violation_count.assign(0)
def increment_violation_count():
with tf.control_dependencies([tf.assign_add(self.violation_count, 1)]):
return tf.identity(self.violation_count)
self.violation_count = tf.cond(tf.greater_equal(dotp, 0), lambda: tf.identity(self.violation_count), increment_violation_count)
projected_gradients = tf.cond(tf.greater_equal(dotp, 0), lambda: tf.identity(flat_task_grads), lambda: tf.identity(proj))
# Convert the flat projected gradient vector into a list
offset = 0
store_proj_grad_ops = []
for v in self.projected_gradients_list:
shape = v.get_shape()
v_params = 1
for dim in shape:
v_params *= dim.value
store_proj_grad_ops.append(tf.assign(v, tf.reshape(projected_gradients[offset:offset+v_params], shape)))
offset += v_params
self.store_proj_grads = tf.group(*store_proj_grad_ops)
# Define training operations for the tasks > 1
with tf.control_dependencies([self.store_proj_grads]):
self.train_subseq_tasks = self.opt.apply_gradients(zip(self.projected_gradients_list, self.trainable_vars))
# Define training operations for the first task
self.first_task_gradients_vars = self.opt.compute_gradients(self.agem_loss, var_list=self.trainable_vars)
self.train_first_task = self.opt.apply_gradients(self.first_task_gradients_vars)
#################################################################################
#### External APIs of the class. These will be called/ exposed externally #######
#################################################################################
def reset_optimizer(self, sess):
"""
Resets the optimizer state
Args:
sess TF session
Returns:
"""
# Call the reset optimizer op
sess.run(self.opt_init_op)
def set_active_outputs(self, sess, labels):
"""
Set the mask for the labels seen so far
Args:
sess TF session
labels Mask labels
Returns:
"""
new_mask = np.zeros(self.total_classes)
new_mask[labels] = 1.0
"""
for l in labels:
new_mask[l] = 1.0
"""
sess.run(self.output_mask.assign(new_mask))
def init_updates(self, sess):
"""
Initialization updates
Args:
sess TF session
Returns:
"""
# Set the star values to the initial weights, so that we can calculate
# big_omegas reliably
if self.imp_method != 'PNN':
sess.run(self.set_star_vars)
def task_updates(self, sess, task, train_x, train_labels, num_classes_per_task=10, class_attr=None, online_cross_val=False):
"""
Updates different variables when a task is completed
Args:
sess TF session
task Task ID
train_x Training images for the task
train_labels Labels in the task
class_attr Class attributes (only needed for ZST transfer)
Returns:
"""
if self.imp_method == 'VAN' or self.imp_method == 'PNN':
# We'll store the current parameters at the end of this function
pass
elif self.imp_method == 'EWC':
# Get the fisher at the end of a task
sess.run(self.get_fisher_at_minima)
# Normalize the fisher
sess.run([self.get_max_fisher_vars, self.get_min_fisher_vars])
sess.run([self.min_fisher, self.max_fisher, self.normalize_fisher_at_minima])
# Don't regularize over the attribute-embedding vectors
#sess.run(self.clear_attr_embed_reg)
# Reset the tmp fisher vars
sess.run(self.reset_tmp_fisher)
elif self.imp_method == 'M-EWC':
# Get the fisher at the end of a task
sess.run(self.get_fisher_at_minima)
# Refine Fisher based on the convergence info
sess.run(self.refine_fisher_at_minima)
# Normalize the fisher
sess.run([self.get_max_fisher_vars, self.get_min_fisher_vars])
sess.run([self.min_fisher, self.max_fisher, self.normalize_fisher_at_minima])
# Reset the tmp fisher vars
sess.run(self.reset_tmp_fisher)
# Reset the small_omega_vars
sess.run(self.reset_small_omega)
elif self.imp_method == 'PI':
# Update big omega variables
sess.run(self.update_big_omega)
# Reset the small_omega_vars because big_omega_vars are updated before it
sess.run(self.reset_small_omega)
elif self.imp_method == 'RWALK':
if task == 0:
# If first task then scale by a factor of 2, so that subsequent averaging does not hurt
sess.run(self.scale_score)
# Get the updated importance score
sess.run(self.update_score)
# Normalize the scores
sess.run([self.get_max_score_vars, self.get_min_score_vars])
sess.run([self.min_score, self.max_score, self.normalize_scores])
# Sparsify scores
"""
# TODO: Tmp remove this?
kp = 0.8 + (task*0.5)
if (kp > 1):
kp = 1.0
"""
#sess.run(self.sparsify_scores, feed_dict={self.keep_prob: kp})
# Get the fisher at the end of a task
sess.run(self.get_fisher_at_minima)
# Normalize fisher
sess.run([self.get_max_fisher_vars, self.get_min_fisher_vars])
sess.run([self.min_fisher, self.max_fisher, self.normalize_fisher_at_minima])
# Sparsify fisher
#sess.run(self.sparsify_fisher, feed_dict={self.keep_prob: kp})
# Store the weights
sess.run(self.weights_delta_old_grouped)
# Reset the small_omega_vars because big_omega_vars are updated before it
sess.run(self.reset_small_omega)
# Reset the big_omega_riemann because importance score is stored in the scores array
sess.run(self.big_omega_riemann_reset)
# Reset the tmp fisher vars
sess.run(self.reset_tmp_fisher)
elif self.imp_method == 'MAS':
# zero out any previous values
sess.run(self.reset_hebbian_scores)
if self.class_attr is not None:
# Define mask based on the class attributes
masked_class_attrs = np.zeros_like(class_attr)
masked_class_attrs[train_labels] = class_attr[train_labels]
# Logits mask
logit_mask = np.zeros(self.total_classes)
logit_mask[train_labels] = 1.0
# Loop over the entire training dataset to compute the parameter importance
batch_size = 10
num_samples = train_x.shape[0]
for iters in range(num_samples// batch_size):
offset = iters * batch_size
if self.class_attr is not None:
sess.run(self.accumulate_hebbian_scores, feed_dict={self.x: train_x[offset:offset+batch_size], self.keep_prob: 1.0,
self.class_attr: masked_class_attrs, self.output_mask: logit_mask, self.train_phase: False})
else:
sess.run(self.accumulate_hebbian_scores, feed_dict={self.x: train_x[offset:offset+batch_size], self.keep_prob: 1.0,
self.output_mask: logit_mask, self.train_phase: False})
# Average the hebbian scores across the training examples
sess.run(self.average_hebbian_scores, feed_dict={self.train_samples: num_samples})
# Store current weights
self.init_updates(sess)
def restore(self, sess):
"""
Restore the weights from the star variables
Args:
sess TF session
Returns:
"""
sess.run(self.restore_weights)
|
agem-main
|
model/model.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import List
import argparse
import numpy as np
import random
import torch
import torch.cuda
import sys
from src.torchrun_utils import init_distributed_mode_torchrun
from src import dist_utils, slurm, util
from src.index_io import load_or_initialize_index
from src.model_io import create_checkpoint_directories, load_or_initialize_atlas_model
from src.options import get_options
from train import train
import torch.distributed as dist
os.environ["TOKENIZERS_PARALLELISM"] = "true"
NCONTEXT: str = "40"
PBSZ: str = "1"
PRECISION: str = "bf16"
GOLD_SCORE_MODE: str = "ppmean"
GPU_MAX_LENGTH: str = "384"
GEN_MAX_LENGTH: str = "32"
EPSILON: str = "0.01"
SMALL_EPSILON: str = "4e-5"
DROPOUT: str = "0.1"
WARMUP_STEPS: str = "5"
EVAL_FREQ: str = "10"
LOG_FREQ: str = "5"
NO_REFRESH: str = "-1"
CHECK_FREQS: List[str] = ["--warmup_steps", "--save_freq", "--eval_freq"]
PORT: str = str(random.randrange(15000, 16000))
def get_argument_value(all_args: List[str], argument_name: str) -> int:
argument_idx = all_args.index(argument_name)
return int(all_args[argument_idx + 1])
def check_valid_input_params(all_args: List[str], total_steps: int) -> None:
for freq in CHECK_FREQS:
try:
arg_val = get_argument_value(all_args, freq)
except ValueError:
print(f"List does not contain value {freq}")
assert arg_val < total_steps, f"The {freq} cannot be higher than the total steps {total_steps}. "
def set_parser_options(parser: argparse.Namespace, passed_args: List[str]) -> argparse.ArgumentParser:
"""
Sets the default options for finetuning an Atlas model for a q&a task.
"""
total_steps = get_argument_value(passed_args, "--total_steps")
all_args = [
"--write_results",
"--train_retriever",
"--query_side_retriever_training",
"--use_gradient_checkpoint_reader",
"--use_gradient_checkpoint_retriever",
"--shard_optim",
"--shard_grads",
"--temperature_gold",
EPSILON,
"--temperature_score",
EPSILON,
"--refresh_index",
"-1",
"--dropout",
DROPOUT,
"--lr",
SMALL_EPSILON,
"--lr_retriever",
SMALL_EPSILON,
"--scheduler",
"linear",
"--weight_decay",
EPSILON,
"--generation_max_length",
GEN_MAX_LENGTH,
"--target_maxlength",
GEN_MAX_LENGTH,
"--gold_score_mode",
GOLD_SCORE_MODE,
"--precision",
PRECISION,
"--text_maxlength",
GPU_MAX_LENGTH,
"--per_gpu_batch_size",
PBSZ,
"--n_context",
NCONTEXT,
"--retriever_n_context",
NCONTEXT,
"--task",
"qa",
"--refresh_index",
NO_REFRESH,
"--warmup_steps",
WARMUP_STEPS,
"--save_freq",
str(total_steps - 1),
"--eval_freq",
EVAL_FREQ,
"--log_freq",
LOG_FREQ,
"--main_port",
PORT,
] + passed_args
check_valid_input_params(all_args, total_steps)
return parser.parse_args(all_args)
if __name__ == "__main__":
options = get_options()
opt = set_parser_options(options.parser, sys.argv[1:])
torch.manual_seed(opt.seed)
if "TORCHELASTIC_RUN_ID" in os.environ:
init_distributed_mode_torchrun(opt)
torch.cuda.set_device(dist.get_rank())
else:
slurm.init_distributed_mode(opt)
slurm.init_signal_handler()
checkpoint_path, saved_index_path = create_checkpoint_directories(opt)
logger = util.init_logger(opt.is_main, opt.is_distributed, os.path.join(checkpoint_path, "run.log"))
if opt.is_main:
options.print_options(opt)
logger.info(f"world size: {dist_utils.get_world_size()}")
index, passages = load_or_initialize_index(opt)
model, optimizer, scheduler, retr_optimizer, retr_scheduler, opt, step = load_or_initialize_atlas_model(opt)
if opt.is_distributed:
if opt.shard_grads:
import fairscale.nn.data_parallel
model.reader = fairscale.nn.data_parallel.ShardedDataParallel(
model.reader, optimizer, auto_refresh_trainable=False
)
if opt.train_retriever:
model.retriever = fairscale.nn.data_parallel.ShardedDataParallel(
model.retriever, retr_optimizer, auto_refresh_trainable=False
)
else:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[opt.local_rank],
output_device=opt.local_rank,
find_unused_parameters=True,
)
model._set_static_graph()
logger.info("Start finetuning")
dist_utils.barrier()
train(
model,
index,
passages,
optimizer,
scheduler,
retr_optimizer,
retr_scheduler,
step,
opt,
checkpoint_path,
)
|
atlas-main
|
finetune_qa.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
from collections import defaultdict
import numpy as np
import torch
import torch.cuda
import logging
from evaluate import evaluate
from src import dist_utils, slurm, util
from src.index_io import load_or_initialize_index, save_embeddings_and_index
from src.model_io import create_checkpoint_directories, load_or_initialize_atlas_model, save_atlas_model
from src.options import get_options
from src.tasks import get_task
os.environ["TOKENIZERS_PARALLELISM"] = "true"
GRAD_SCALE_UPPER_BOUND_MEAN: int = 1000
GRAD_SCALE_LOWER_BOUND_MEAN: float = 0.01
THRESHOLD_GRAD_STATS: int = 100
logger = logging.getLogger(__name__)
def train(
model,
index,
passages,
optimizer,
scheduler,
retr_optimizer,
retr_scheduler,
step,
opt,
checkpoint_path,
):
tb_logger = util.init_tb_logger(os.path.join(opt.checkpoint_dir, opt.name), is_main=opt.is_main)
run_stats = util.WeightedAvgStats()
unwrapped_model = util.get_unwrapped_model_if_wrapped(model)
# different seed for different sampling depending on global_rank
torch.manual_seed(opt.global_rank + opt.seed)
scale = 2.0
grad_stats = defaultdict(lambda: [])
task = get_task(opt, unwrapped_model.reader_tokenizer)
index_refresh_scheduler = util.IndexRefreshScheduler(
opt.refresh_index, opt.freeze_retriever_steps, opt.train_retriever
)
while step < opt.total_steps:
data_iterator = task.data_iterator(
opt.train_data, opt.global_rank, opt.world_size, repeat_if_less_than_world_size=True, opt=opt
)
data_iterator = filter(None, map(task.process, data_iterator))
data_iterator = task.batch_iterator(data_iterator, opt.per_gpu_batch_size, drop_last=True, shuffle=opt.shuffle)
for i, batch in enumerate(data_iterator):
iter_stats = {}
model.train()
if not opt.use_file_passages and index_refresh_scheduler.is_time_to_refresh(step):
if not (step == 0 and opt.load_index_path is not None): # Dont refresh index if just loaded it
indexing_start = time.time()
unwrapped_model.build_index(index, passages, opt.per_gpu_embedder_batch_size, logger)
iter_stats["runtime/indexing"] = (time.time() - indexing_start, 1)
if opt.save_index_path is not None:
save_embeddings_and_index(index, opt)
step += 1
train_step_start = time.time()
reader_loss, retriever_loss = model(
index=index,
query=batch["query"],
target=batch["target"],
target_tokens=batch.get("target_tokens"),
passages=batch["passages"] if opt.use_file_passages else None,
batch_metadata=batch.get("metadata"),
filtering_fun=task.filter,
train_retriever=opt.train_retriever and step > opt.freeze_retriever_steps,
iter_stats=iter_stats,
)
if retriever_loss is not None and opt.train_retriever:
train_loss = reader_loss.float() + retriever_loss
else:
train_loss = reader_loss
iter_stats["loss/train_loss"] = (train_loss.item(), len(batch["query"]))
backward_start = time.time()
train_loss = scale * train_loss
train_loss.backward()
iter_stats["runtime/backward"] = (time.time() - backward_start, 1)
model_update_start = time.time()
stats = util.compute_grad_stats(model)
if stats["skip_example"]:
model.zero_grad()
# continue
else:
for k, v in stats.items():
grad_stats[k].append(v)
if len(grad_stats["max"]) >= THRESHOLD_GRAD_STATS:
if np.mean(grad_stats["max"]) > GRAD_SCALE_UPPER_BOUND_MEAN:
scale /= 2
elif np.mean(grad_stats["mean"]) < GRAD_SCALE_LOWER_BOUND_MEAN:
scale *= 2
# print(f'Scale: {scale}')
grad_stats.clear()
if step % opt.accumulation_steps == 0 and not stats["skip_example"]:
if opt.is_distributed and opt.shard_optim:
optimizer.clip_grad_norm(scale * opt.clip)
if opt.train_retriever:
retr_optimizer.clip_grad_norm(scale * opt.clip)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), scale * opt.clip)
optimizer.step(scale=scale)
scheduler.step()
if opt.train_retriever:
retr_optimizer.step(scale=scale)
retr_scheduler.step()
model.zero_grad()
iter_stats["runtime/model_update"] = (time.time() - model_update_start, 1)
iter_stats["runtime/train_step"] = (time.time() - train_step_start, 1)
run_stats.update(iter_stats)
if step % opt.log_freq == 0:
log = f"{step} / {opt.total_steps}"
for k, v in sorted(run_stats.average_stats.items()):
log += f" | {k}: {v:.3g}"
if tb_logger:
tb_logger.add_scalar(k, v, step)
log += f" | lr: {scheduler.get_last_lr()[0]:0.2g}"
log += f" | Memory: {torch.cuda.max_memory_allocated()//1e9} GiB"
if tb_logger:
tb_logger.add_scalar("lr", scheduler.get_last_lr()[0], step)
logger.info(log)
run_stats.reset()
if step % opt.eval_freq == 0:
for data_path in opt.eval_data:
dataset_name = os.path.basename(data_path)
metrics = evaluate(model, index, opt, data_path, step)
log_message = f"Dataset: {dataset_name}"
for k, v in metrics.items():
log_message += f" | {v:.3f} {k}"
if tb_logger:
tb_logger.add_scalar(f"{dataset_name}/{k}", v, step)
logger.info(log_message)
if step % opt.save_freq == 0:
save_atlas_model(
unwrapped_model,
optimizer,
scheduler,
retr_optimizer,
retr_scheduler,
step,
opt,
checkpoint_path,
f"step-{step}",
)
if step > opt.total_steps:
exit()
if __name__ == "__main__":
options = get_options()
opt = options.parse()
torch.manual_seed(opt.seed)
slurm.init_distributed_mode(opt)
slurm.init_signal_handler()
checkpoint_path, saved_index_path = create_checkpoint_directories(opt)
logger = util.init_logger(opt.is_main, opt.is_distributed, os.path.join(checkpoint_path, "run.log"))
if opt.is_main:
options.print_options(opt)
logger.info(f"world size: {dist_utils.get_world_size()}")
index, passages = load_or_initialize_index(opt)
model, optimizer, scheduler, retr_optimizer, retr_scheduler, opt, step = load_or_initialize_atlas_model(opt)
if opt.is_distributed:
if opt.shard_grads:
import fairscale.nn.data_parallel
model.reader = fairscale.nn.data_parallel.ShardedDataParallel(
model.reader, optimizer, auto_refresh_trainable=False
)
if opt.train_retriever:
model.retriever = fairscale.nn.data_parallel.ShardedDataParallel(
model.retriever, retr_optimizer, auto_refresh_trainable=False
)
else:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[opt.local_rank],
output_device=opt.local_rank,
find_unused_parameters=True,
)
model._set_static_graph()
logger.info("Start training")
dist_utils.barrier()
train(
model,
index,
passages,
optimizer,
scheduler,
retr_optimizer,
retr_scheduler,
step,
opt,
checkpoint_path,
)
|
atlas-main
|
train.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
from collections import defaultdict
import numpy as np
import torch
import torch.cuda
import torch.distributed as dist
from src import dist_utils, slurm, util
from src.index_io import load_or_initialize_index, save_embeddings_and_index
from src.model_io import create_checkpoint_directories, load_or_initialize_atlas_model
from src.options import get_options
from src.tasks import get_task
os.environ["TOKENIZERS_PARALLELISM"] = "true"
def _get_eval_data_iterator(opt, data_path, task):
data_iterator = task.data_iterator(data_path, opt.global_rank, opt.world_size, opt=opt, is_eval=True)
data_iterator = filter(None, map(task.process, data_iterator))
data_iterator = list(task.batch_iterator(data_iterator, opt.per_gpu_batch_size))
if dist.is_initialized():
len_data = torch.tensor(len(data_iterator), device=torch.device("cuda"))
dist.all_reduce(len_data, torch.distributed.ReduceOp.MAX)
dist.barrier()
if len(data_iterator) < len_data.item():
data_iterator.extend([{} for _ in range(len_data.item() - len(data_iterator))])
return data_iterator
@torch.no_grad()
def run_retrieval_only(model, index, opt, data_path, step=None):
model.eval()
metrics = defaultdict(lambda: [])
dataset_wpred = []
unwrapped_model = util.get_unwrapped_model_if_wrapped(model)
reader_tokenizer = unwrapped_model.reader_tokenizer
task = get_task(opt, reader_tokenizer)
data_iterator = _get_eval_data_iterator(opt, data_path, task)
for i, batch in enumerate(data_iterator):
query = batch.get("query", [""])
answers = batch.get("target", [""])
batch_metadata = batch.get("metadata")
query_enc = model.retriever_tokenize(query)
retrieved_passages, _ = unwrapped_model.retrieve(
index,
opt.n_context,
query,
query_enc["input_ids"].cuda(),
query_enc["attention_mask"].cuda(),
batch_metadata=batch_metadata,
filtering_fun=task.filter,
)
# If example is a padding example then skip step
if (len(query) == 0) or (len(query[0]) == 0):
continue
for k in range(len(retrieved_passages)):
if opt.write_results:
gold = [answers[k]] if not "answers" in batch else batch["answers"][k]
ex = {"query": query[k], "answers": gold, "passages": retrieved_passages[k]}
if batch_metadata is not None:
ex["metadata"] = batch_metadata[k]
if "id" in batch:
ex["id"] = batch["id"][k]
dataset_wpred.append(ex)
if opt.write_results:
dataset_name, _ = os.path.splitext(os.path.basename(data_path))
dataset_name = f"{dataset_name}-step-{step}"
util.save_distributed_dataset(dataset_wpred, dataset_name, opt)
return metrics
@torch.no_grad()
def evaluate(model, index, opt, data_path, step=None):
model.eval()
metrics = defaultdict(lambda: [])
dataset_wpred = []
unwrapped_model = util.get_unwrapped_model_if_wrapped(model)
reader_tokenizer = unwrapped_model.reader_tokenizer
task = get_task(opt, reader_tokenizer)
data_iterator = _get_eval_data_iterator(opt, data_path, task)
for i, batch in enumerate(data_iterator):
query = batch.get("query", [""])
answers = batch.get("target", [""])
batch_metadata = batch.get("metadata")
target_tokens = batch.get("target_tokens")
query_enc, labels, decoder_input_ids = unwrapped_model.tokenize(query, answers, target_tokens=target_tokens)
if not opt.use_file_passages:
query_ids_retriever = query_enc["input_ids"].cuda()
query_mask_retriever = query_enc["attention_mask"].cuda()
retrieved_passages, _ = unwrapped_model.retrieve(
index,
opt.n_context,
query,
query_ids_retriever,
query_mask_retriever,
batch_metadata=batch_metadata,
filtering_fun=task.filter,
)
else:
assert "passages" in batch, "cant use use_file_passages mode without passing in passages"
retrieved_passages = [p[: opt.n_context] for p in batch["passages"]]
# If example is a padding example then skip step
if (len(query) == 0) or (len(query[0]) == 0):
continue
reader_tokens, _ = unwrapped_model.tokenize_passages(query, retrieved_passages)
if "eval_loss" in task.metrics:
eval_loss, logits = unwrapped_model.compute_reader_loss_and_logits(reader_tokens, decoder_input_ids, labels)
metrics["eval_loss"].append(eval_loss)
generation = unwrapped_model.generate(
reader_tokens, query, choices=batch["choices"] if "choices" in batch else None
)
for k, g in enumerate(generation):
if opt.decoder_prompt_format is not None:
query_ids = reader_tokenizer.encode(
opt.decoder_prompt_format.format_map({"query": query[k]}), add_special_tokens=False
)
g = g[len(query_ids) + 1 :]
pred = reader_tokenizer.decode(g, skip_special_tokens=True)
gold = [answers[k]] if not "answers" in batch else batch["answers"][k]
sample_metrics = task.evaluation(pred, gold)
for key, value in sample_metrics.items():
metrics[key].append(value)
if opt.write_results:
ex = {"query": query[k], "answers": gold, "generation": pred}
if not opt.dont_write_passages:
ex["passages"] = retrieved_passages[k]
if batch_metadata is not None:
ex["metadata"] = batch_metadata[k]
if opt.task == "multiple_choice":
ex["choice_logits"] = task.get_choice_logits(logits[k])
if "id" in batch:
ex["id"] = batch["id"][k]
dataset_wpred.append(ex)
metrics, dataset_wpred = task.evaluation_postprocessing(metrics, dataset_wpred)
metrics = util.avg_dist_dict(task.metrics, metrics)
metrics = {key: value if key == "eval_loss" else 100 * value for key, value in metrics.items()}
if opt.write_results:
dataset_name, _ = os.path.splitext(os.path.basename(data_path))
dataset_name = f"{dataset_name}-step-{step}"
util.save_distributed_dataset(dataset_wpred, dataset_name, opt)
return metrics
if __name__ == "__main__":
options = get_options()
opt = options.parse()
torch.manual_seed(opt.seed)
slurm.init_distributed_mode(opt)
slurm.init_signal_handler()
checkpoint_path, saved_index_path = create_checkpoint_directories(opt)
logger = util.init_logger(opt.is_main, opt.is_distributed, os.path.join(checkpoint_path, "run.log"))
if opt.is_main:
options.print_options(opt)
logger.info(f"world size: {dist_utils.get_world_size()}")
index, passages = load_or_initialize_index(opt)
model, _, _, _, _, opt, step = load_or_initialize_atlas_model(opt, eval_only=True)
logger.info("Start Evaluation")
dist_utils.barrier()
if not opt.use_file_passages and opt.load_index_path is None:
indexing_start = time.time()
model.build_index(index, passages, opt.per_gpu_embedder_batch_size, logger)
if opt.save_index_path is not None:
save_embeddings_and_index(index, opt)
for data_path in opt.eval_data:
dataset_name = os.path.basename(data_path)
logger.info(f"Start Evaluation on {data_path}")
if opt.retrieve_only:
run_retrieval_only(model, index, opt, data_path, step)
else:
metrics = evaluate(model, index, opt, data_path, step)
log_message = f"Dataset: {dataset_name}"
for k, v in metrics.items():
log_message += f" | {v:.3f} {k}"
logger.info(log_message)
|
atlas-main
|
evaluate.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
SUBCATEGORIES = {
"humanities": [
"high_school_european_history",
"high_school_us_history",
"high_school_world_history",
"prehistory",
"formal_logic",
"logical_fallacies",
"moral_disputes",
"moral_scenarios",
"philosophy",
"world_religions",
"international_law",
"jurisprudence",
"professional_law",
],
"Soc Sci.": [
"high_school_government_and_politics",
"public_relations",
"security_studies",
"us_foreign_policy",
"human_sexuality",
"sociology",
"econometrics",
"high_school_macroeconomics",
"high_school_microeconomics",
"high_school_geography",
"high_school_psychology",
"professional_psychology",
],
"STEM": [
"astronomy",
"college_physics",
"conceptual_physics",
"high_school_physics",
"college_chemistry",
"high_school_chemistry",
"college_biology",
"high_school_biology",
"college_computer_science",
"computer_security",
"high_school_computer_science",
"machine_learning",
"abstract_algebra",
"college_mathematics",
"elementary_mathematics",
"high_school_mathematics",
"high_school_statistics",
"electrical_engineering",
],
"other": [
"global_facts",
"miscellaneous",
"professional_accounting",
"business_ethics",
"management",
"marketing",
"anatomy",
"clinical_knowledge",
"college_medicine",
"human_aging",
"medical_genetics",
"nutrition",
"professional_medicine",
"virology",
],
"all": [
"abstract_algebra",
"anatomy",
"astronomy",
"business_ethics",
"clinical_knowledge",
"college_biology",
"college_chemistry",
"college_computer_science",
"college_mathematics",
"college_medicine",
"college_physics",
"computer_security",
"conceptual_physics",
"econometrics",
"electrical_engineering",
"elementary_mathematics",
"formal_logic",
"global_facts",
"high_school_biology",
"high_school_chemistry",
"high_school_computer_science",
"high_school_european_history",
"high_school_geography",
"high_school_government_and_politics",
"high_school_macroeconomics",
"high_school_mathematics",
"high_school_microeconomics",
"high_school_physics",
"high_school_psychology",
"high_school_statistics",
"high_school_us_history",
"high_school_world_history",
"human_aging",
"human_sexuality",
"international_law",
"jurisprudence",
"logical_fallacies",
"machine_learning",
"management",
"marketing",
"medical_genetics",
"miscellaneous",
"moral_disputes",
"moral_scenarios",
"nutrition",
"philosophy",
"prehistory",
"professional_accounting",
"professional_law",
"professional_medicine",
"professional_psychology",
"public_relations",
"security_studies",
"sociology",
"us_foreign_policy",
"virology",
"world_religions",
],
}
def load_predictions_file(file):
predictions = {}
for line in open(file):
dp = json.loads(line)
if "permuatations" in dp:
dp["permutations"] = dp["permuatations"]
original = [p for p in dp["permutations"] if p["metadata"]["is_original"]][0]
dataset = original["metadata"]["dataset"].replace("_test", "").replace("_valid", "")
uuid = original["metadata"]["question"] + str(original["metadata"]["options"])
original_prediction = max(original["choice_logits"].items(), key=lambda x: x[1])[0]
debiased_prediction = dp["generation"]
predictions.setdefault(dataset, {})[uuid] = {
"prediction": original_prediction,
"debiased_prediction": debiased_prediction,
}
return predictions
def load_predictions(path, step=None, split=None):
if os.path.isdir(path):
predictions = {}
for domain in os.listdir(path):
predictions_path = os.path.join(path, domain, f"{domain}.{split}-step-{step}.jsonl")
if not os.path.exists(predictions_path):
raise ValueError(f"{predictions_path} expected but missing")
predictions.update(load_predictions_file(predictions_path))
else:
predictions = load_predictions_file(path)
return predictions
def load_gold_file(file):
gold = {}
for line in open(file):
dp = json.loads(line)
dataset = dp["dataset"].replace("_test", "").replace("_valid", "")
uuid = dp["question"] + str(dp["options"])
gold_answer = dp["answer"]
gold.setdefault(dataset, {})[uuid] = gold_answer
return gold
def score_categories(gold_answers, predictions, categories):
acc = []
debiased_acc = []
for cat in categories:
preds = predictions[cat]
golds = gold_answers[cat]
for question in golds.keys():
pred = preds[question]
gold = golds[question]
acc.append(pred["prediction"] == gold)
debiased_acc.append(pred["debiased_prediction"] == gold)
acc = sum(acc) / len(acc)
debiased_acc = sum(debiased_acc) / len(debiased_acc)
return acc, debiased_acc
def main(predictions_file, gold_file, step=None, split=None):
print(f"predictions for {predictions_file}")
print(f"{'category': >15}\t{'Acc(%)':>15}\t{'Debias Acc(%)':>15}")
predictions = load_predictions(predictions_file, step, split)
gold_answers = load_gold_file(gold_file)
print("-" * 47)
for category_name, categories in SUBCATEGORIES.items():
scores, debiased_scores = score_categories(gold_answers, predictions, categories)
sc, db = f"{100*scores:0.2f}", f"{100*debiased_scores:0.2f}"
print(f"{category_name: >15}\t{sc:>15}\t{db:>15}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--predictions_path",
type=str,
help="Path to the written predictions file",
)
parser.add_argument(
"--gold_path",
type=str,
help="Path to the written predictions file (zero-shot, 5-shot multi, full) or directory containing models (5-shot)",
)
parser.add_argument(
"--step",
type=int,
default=16,
help="only for 5-shot, specify the step to evaluate",
)
parser.add_argument(
"--split",
type=str,
default="valid",
help="only for 5-shot, specify the split to evaluate",
)
args = parser.parse_args()
main(args.predictions_path, args.gold_path, step=args.step, split=args.split)
|
atlas-main
|
evaluation_scripts/evaluate_mmlu_predictions.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
from pathlib import Path
import argparse
import shutil
import tarfile
from download_tools import maybe_download_file
# random 64 examples used with Atlas
nq_64shot = [
27144,
14489,
49702,
38094,
6988,
60660,
65643,
48249,
48085,
52629,
48431,
7262,
34659,
24332,
44839,
17721,
50819,
62279,
37021,
77405,
52556,
23802,
40974,
64678,
69673,
77277,
18419,
25635,
1513,
11930,
5542,
13453,
52754,
65663,
67400,
42409,
74541,
33159,
65445,
28572,
74069,
7162,
19204,
63509,
12244,
48532,
72778,
37507,
70300,
29927,
18186,
27579,
58411,
63559,
4347,
59383,
57392,
42014,
77920,
45592,
32321,
3422,
61041,
34051,
]
# random 64 examples used with Atlas
triviaqa_64shot = [
75927,
38807,
452,
68095,
44621,
34592,
36091,
65286,
56484,
48197,
34692,
28011,
16670,
62641,
37865,
6658,
45724,
37527,
17740,
31133,
8010,
48573,
53670,
15514,
25996,
54404,
10739,
55105,
66122,
73324,
41202,
71253,
41258,
51344,
60092,
50455,
65078,
36169,
33408,
55106,
40526,
65582,
66337,
39766,
77174,
17289,
7367,
50930,
21151,
21809,
52804,
26110,
54414,
73358,
11459,
66019,
41084,
13349,
39059,
6626,
25540,
15110,
53320,
61313,
]
def convert_triviaqa(ex):
target = ex["Answer"]["Value"]
if target.isupper():
target = target.title()
return {
"question": ex["Question"],
"answers": ex["Answer"]["Aliases"],
"target": target,
}
def convert_nq(ex):
return {"question": ex["question"], "answers": ex["answer"]}
def preprocess_triviaqa(orig_dir, output_dir, index_dir):
data, index = {}, {}
for split in ["train", "dev", "test"]:
with open(index_dir / ("TQA." + split + ".idx.json"), "r") as fin:
index[split] = json.load(fin)
with open(orig_dir / "triviaqa-unfiltered" / "unfiltered-web-train.json") as fin:
originaltrain = json.load(fin)["Data"]
with open(orig_dir / "triviaqa-unfiltered" / "unfiltered-web-dev.json") as fin:
originaldev = json.load(fin)["Data"]
data["train"] = [convert_triviaqa(originaltrain[k]) for k in index["train"]]
data["train.64-shot"] = [convert_triviaqa(originaltrain[k]) for k in triviaqa_64shot]
data["dev"] = [convert_triviaqa(originaltrain[k]) for k in index["dev"]]
data["test"] = [convert_triviaqa(originaldev[k]) for k in index["test"]]
for split in data:
with open(output_dir / (split + ".jsonl"), "w") as fout:
for ex in data[split]:
json.dump(ex, fout, ensure_ascii=False)
fout.write("\n")
def preprocess_nq(orig_dir, output_dir, index_dir):
data, index = {}, {}
for split in ["train", "dev", "test"]:
with open(index_dir / ("NQ." + split + ".idx.json"), "r") as fin:
index[split] = json.load(fin)
originaltrain, originaldev = [], []
with open(orig_dir / "NQ-open.dev.jsonl") as fin:
for k, example in enumerate(fin):
example = json.loads(example)
originaldev.append(example)
with open(orig_dir / "NQ-open.train.jsonl") as fin:
for k, example in enumerate(fin):
example = json.loads(example)
originaltrain.append(example)
data["train"] = [convert_nq(originaltrain[k]) for k in index["train"]]
data["train.64-shot"] = [convert_nq(originaltrain[k]) for k in nq_64shot]
data["dev"] = [convert_nq(originaltrain[k]) for k in index["dev"]]
data["test"] = [convert_nq(originaldev[k]) for k in index["test"]]
for split in data:
with open(output_dir / (split + ".jsonl"), "w") as fout:
for ex in data[split]:
json.dump(ex, fout, ensure_ascii=False)
fout.write("\n")
def main(args):
output_dir = Path(args.output_directory)
index_tar = output_dir / "index.tar"
index_dir = output_dir / "dataindex"
original_triviaqa_dir = output_dir / "original_triviaqa"
triviaqa_dir = output_dir / "triviaqa_data"
triviaqa_tar = output_dir / "triviaqa_data.tar"
nq_dir = output_dir / "nq_data"
original_nq_dir = output_dir / "original_naturalquestions"
if args.overwrite:
print("Overwriting NaturalQuestions and TriviaQA")
download_triviaqa = True
download_nq = True
else:
download_triviaqa = not triviaqa_dir.exists()
download_nq = not nq_dir.exists()
if download_triviaqa or download_nq:
index_url = "https://dl.fbaipublicfiles.com/FiD/data/dataindex.tar.gz"
maybe_download_file(index_url, index_tar)
if not os.path.exists(index_dir):
with tarfile.open(index_tar) as tar:
tar.extractall(index_dir)
if download_triviaqa:
triviaqa_dir.mkdir(parents=True, exist_ok=True)
original_triviaqa_url = "http://nlp.cs.washington.edu/triviaqa/data/triviaqa-unfiltered.tar.gz"
maybe_download_file(original_triviaqa_url, triviaqa_tar)
if not os.path.exists(original_triviaqa_dir):
with tarfile.open(triviaqa_tar) as tar:
tar.extractall(original_triviaqa_dir)
preprocess_triviaqa(original_triviaqa_dir, triviaqa_dir, index_dir)
else:
print("TriviaQA data already exists, not overwriting")
if download_nq:
nq_dir.mkdir(parents=True, exist_ok=True)
nq_dev_url = "https://raw.githubusercontent.com/google-research-datasets/natural-questions/master/nq_open/NQ-open.dev.jsonl"
nq_train_url = "https://raw.githubusercontent.com/google-research-datasets/natural-questions/master/nq_open/NQ-open.train.jsonl"
maybe_download_file(nq_dev_url, original_nq_dir / "NQ-open.dev.jsonl")
maybe_download_file(nq_train_url, original_nq_dir / "NQ-open.train.jsonl")
preprocess_nq(original_nq_dir, nq_dir, index_dir)
else:
print("NaturalQuestions data already exists, not overwriting")
triviaqa_tar.unlink(missing_ok=True)
index_tar.unlink(missing_ok=True)
if original_triviaqa_dir.exists():
shutil.rmtree(original_triviaqa_dir)
if original_nq_dir.exists():
shutil.rmtree(original_nq_dir)
if index_dir.exists():
shutil.rmtree(index_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_directory",
type=str,
default="./data/",
help="Path to the file to which the dataset is written.",
)
parser.add_argument("--overwrite", action="store_true", help="Overwrite data")
args = parser.parse_args()
main(args)
|
atlas-main
|
preprocessing/prepare_qa.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
from pathlib import Path
from download_tools import maybe_download_file
URLS = {
"train": "https://storage.googleapis.com/gresearch/templama/train.json",
"valid": "https://storage.googleapis.com/gresearch/templama/val.json",
"test": "https://storage.googleapis.com/gresearch/templama/test.json",
}
def prep_question(question):
return question.replace("_X_", "<extra_id_0>")
def maybe_download_data(output_directory):
paths = {}
for split, url in URLS.items():
dest = output_directory / f"{split}.original.jsonl"
maybe_download_file(url, dest)
paths[split] = dest
return paths
def _parse(path, years_to_parse):
items = []
for line in open(path):
if line.strip() != "":
items.append(json.loads(line))
mapper = {}
for i in items:
if i["date"] in years_to_parse:
mapper.setdefault(i["query"], []).append(i)
return mapper
def _dump(output_path, objects_to_write):
with open(output_path, "w") as f:
for item in objects_to_write:
f.write(json.dumps(item) + "\n")
def _get_export_obj(obj):
return {
"question": prep_question(obj["query"]),
"answers": list(set([n["name"] for n in obj["answer"]])),
"metadata": {"original_instance": obj},
}
def main(output_directory, years_to_compare=["2017", "2020"]):
os.makedirs(output_directory, exist_ok=True)
paths = maybe_download_data(output_directory)
for split, path in paths.items():
to_write = {y: [] for y in years_to_compare}
query2items = _parse(path, years_to_compare)
for _, objects in query2items.items():
if len(objects) == 1: # question doesnt have different answers at different years
continue
first_answer, later_answers = objects[0], objects[1:]
previous_answer_strings = set([n["name"] for n in first_answer["answer"]])
different_later_answers = []
for later_answer in later_answers:
if all([n["name"] not in previous_answer_strings for n in later_answer["answer"]]):
different_later_answers.append(later_answer)
if len(different_later_answers) > 0:
to_write[first_answer["date"]].append(_get_export_obj(first_answer))
for d in different_later_answers:
to_write[d["date"]].append(_get_export_obj(d))
for date, items in to_write.items():
output_path = output_directory / f"temp_lama.{split}.{date}.jsonl"
_dump(output_path, items)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_directory",
type=str,
default="./data",
help="Path to the file to which the dataset is written.",
)
args = parser.parse_args()
output_directory = Path(args.output_directory) / "data" / "templama_data"
main(output_directory)
|
atlas-main
|
preprocessing/prepare_templama.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from download_tools import get_download_path, get_s3_path, maybe_download_file
BASE_URL = "https://dl.fbaipublicfiles.com/atlas"
PASSAGE_FNAME = "passages.{shard}.pt"
EMBEDDING_FNAME = "embeddings.{shard}.pt"
N_SHARDS = 128
AVAILABLE_INDICES = [
{
"index": "indices/atlas/wiki/xxl",
"description": "Precomputed index for the wiki-dec2018 corpus for the pretrained atlas xxl model",
},
{
"index": "indices/atlas/wiki/xl",
"description": "Precomputed index for the wiki-dec2018 corpus for the pretrained atlas xl model",
},
{
"index": "indices/atlas/wiki/large",
"description": "Precomputed index for the wiki-dec2018 corpus for the pretrained atlas large model",
},
{
"index": "indices/atlas/wiki/base",
"description": "Precomputed index for the wiki-dec2018 corpus for the pretrained atlas base model",
},
{
"index": "indices/atlas_nq/wiki/xxl",
"description": "Precomputed index for the wiki-dec2018 corpus for the natural-questions-finetuned atlas xxl model",
},
{
"index": "indices/atlas_nq/wiki/xl",
"description": "Precomputed index for the wiki-dec2018 corpus for the natural-questions-finetuned atlas xl model",
},
{
"index": "indices/atlas_nq/wiki/large",
"description": "Precomputed index for the wiki-dec2018 corpus for the natural-questions-finetuned atlas large model",
},
{
"index": "indices/atlas_nq/wiki/base",
"description": "Precomputed index for the wiki-dec2018 corpus for the natural-questions-finetuned atlas base model",
},
]
def _helpstr():
helpstr = "The following indices are available for download: "
for m in AVAILABLE_INDICES:
helpstr += f'\nIndex name: {m["index"]:<30} Description: {m["description"]}'
helpstr += "\nDownload by passing --index {index name}"
return helpstr
def get_passage_path(index, shard_number):
passage_filename = PASSAGE_FNAME.format(shard=shard_number)
return f"{index}/{passage_filename}"
def get_embedding_path(index, shard_number):
embedding_filename = EMBEDDING_FNAME.format(shard=shard_number)
return f"{index}/{embedding_filename}"
def main(output_directory, requested_index):
for shard in range(N_SHARDS):
passage_path = get_passage_path(requested_index, shard)
source = get_s3_path(passage_path)
target = get_download_path(output_directory, passage_path)
maybe_download_file(source, target)
embedding_path = get_embedding_path(requested_index, shard)
source = get_s3_path(embedding_path)
target = get_download_path(output_directory, embedding_path)
maybe_download_file(source, target)
if __name__ == "__main__":
help_str = _helpstr()
choices = list([a["index"] for a in AVAILABLE_INDICES])
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--output_directory",
type=str,
default="./data",
help="Path to the file to which the dataset is written.",
)
parser.add_argument(
"--index",
type=str,
choices=choices,
help=help_str,
)
args = parser.parse_args()
main(args.output_directory, args.index)
|
atlas-main
|
preprocessing/download_index.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import wget
BASE_URL = "https://dl.fbaipublicfiles.com/atlas"
def maybe_download_file(source, target):
if not os.path.exists(target):
os.makedirs(os.path.dirname(target), exist_ok=True)
print(f"Downloading {source} to {target}")
wget.download(source, out=str(target))
print()
def get_s3_path(path):
return f"{BASE_URL}/{path}"
def get_download_path(output_dir, path):
return os.path.join(output_dir, path)
|
atlas-main
|
preprocessing/download_tools.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
from pathlib import Path
import argparse
from download_tools import maybe_download_file
fever_64shot = [
23236,
131610,
70907,
110333,
83874,
121714,
17355,
115320,
9907,
42725,
43614,
139489,
30589,
76963,
5916,
7241,
68848,
59902,
113855,
110886,
102332,
79223,
24359,
105929,
131435,
118883,
8152,
119911,
28803,
111318,
29503,
43420,
39533,
15214,
29807,
29242,
10288,
111860,
77451,
102160,
77982,
132435,
2875,
47721,
92378,
128574,
24721,
83985,
41521,
97851,
137243,
74916,
85056,
135,
130085,
19233,
2887,
124345,
91769,
63969,
50865,
135928,
143220,
124300,
]
def main(args):
output_dir = Path(args.output_directory)
fever_path, fever_url = {}, {}
fever_dir = output_dir / "fever_data"
fever_path["train"] = fever_dir / "train.jsonl"
fever_path["train-64"] = fever_dir / "train-64.jsonl"
fever_path["dev"] = fever_dir / "dev.jsonl"
fever_path["test"] = fever_dir / "test.jsonl"
fever_url["train"] = "https://fever.ai/download/fever/train.jsonl"
fever_url["dev"] = "https://fever.ai/download/fever/shared_task_dev.jsonl"
fever_url["test"] = "https://fever.ai/download/fever/shared_task_test.jsonl"
for split in ["train", "dev", "test"]:
if args.overwrite or not fever_path[split].exists():
maybe_download_file(fever_url[split], fever_path[split])
else:
print(f"{split} file already exists, not overwriting, use --overwrite instead")
with open(fever_path["train"]) as fin:
with open(fever_path["train-64"], "w") as fout:
for k, line in enumerate(fin):
if k in fever_64shot:
ex = json.loads(line)
json.dump(ex, fout)
fout.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_directory",
type=str,
default="./data/",
help="Path to the file to which the dataset is written.",
)
parser.add_argument("--overwrite", action="store_true", help="Overwrite data")
args = parser.parse_args()
main(args)
|
atlas-main
|
preprocessing/prepare_fever.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from download_tools import get_download_path, get_s3_path, maybe_download_file
BASE_URL = "https://dl.fbaipublicfiles.com/atlas"
MODEL_FILE_NAME = "model.pth.tar"
AVAILABLE_MODELS = [
{"model": "models/atlas/xxl", "description": "Pretrained Atlas XXL model"},
{"model": "models/atlas/xl", "description": "Pretrained Atlas XL model"},
{"model": "models/atlas/large", "description": "Pretrained Atlas Large model"},
{"model": "models/atlas/base", "description": "Pretrained Atlas Base model"},
{"model": "models/atlas_nq/xxl", "description": "Atlas XXL model, finetuned on Natural Questions"},
{"model": "models/atlas_nq/xl", "description": "Atlas XL model, finetuned on Natural Questions"},
{"model": "models/atlas_nq/large", "description": "Atlas large model, finetuned on Natural Questions"},
{"model": "models/atlas_nq/base", "description": "Atlas base model, finetuned on Natural Questions"},
]
def _helpstr():
helpstr = "The following models are available for download: "
for m in AVAILABLE_MODELS:
helpstr += f'\nModel name: {m["model"]:<30} Description: {m["description"]}'
helpstr += "\ndownload by passing --model {model name}"
return helpstr
def main(output_directory, requested_model):
model_path = f"{requested_model}/{MODEL_FILE_NAME}"
source = get_s3_path(model_path)
target = get_download_path(output_directory, model_path)
maybe_download_file(source, target)
if __name__ == "__main__":
help_str = _helpstr()
choices = list([a["model"] for a in AVAILABLE_MODELS])
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--output_directory",
type=str,
default="./data",
help="Path to the file to which the dataset is written.",
)
parser.add_argument(
"--model",
type=str,
choices=choices,
help=help_str,
)
args = parser.parse_args()
main(args.output_directory, args.model)
|
atlas-main
|
preprocessing/download_model.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import csv
import json
import os
import random
import tarfile
from pathlib import Path
from download_tools import maybe_download_file
DATA_URL = "https://people.eecs.berkeley.edu/~hendrycks/data.tar"
def maybe_download_data(output_directory):
os.makedirs(output_directory, exist_ok=True)
# download tar:
orig_data_tar = output_directory / "data.tar"
maybe_download_file(DATA_URL, orig_data_tar)
untarred_orig_data = Path(output_directory) / "data"
if not os.path.exists(untarred_orig_data):
with tarfile.open(orig_data_tar) as tar:
tar.extractall(output_directory)
return untarred_orig_data
def build_mmlu_instance(name, line):
question, option_a, option_b, option_c, option_d, answer = line
return {
"question": question,
"options": {"A": option_a, "B": option_b, "C": option_c, "D": option_d},
"answer": answer,
"dataset": name,
}
def get_dataset_name_from_path(path):
return os.path.basename(path).replace(".csv", "")
def parse_mmlu_csv(path):
output = []
name = get_dataset_name_from_path(path)
with open(path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
for line in csv_reader:
obj = build_mmlu_instance(name, line)
output.append(obj)
return output
def parse_all_mmlu_data(directory):
all_data = {}
for split in ["auxiliary_train", "dev", "val", "test"]:
for fi in os.listdir(directory / split):
path_to_read = directory / split / fi
name = get_dataset_name_from_path(path_to_read)
all_data.setdefault(split, {})[name] = parse_mmlu_csv(path_to_read)
return all_data
def dump(items, path):
with open(path, "w") as f:
for item in items:
f.write(json.dumps(item) + "\n")
def make_five_shot_data(data, output_directory):
indiv_train_path = output_directory / "individual_train"
indiv_valid_path = output_directory / "individual_valid"
indiv_test_path = output_directory / "individual_test"
os.makedirs(indiv_train_path, exist_ok=True)
os.makedirs(indiv_valid_path, exist_ok=True)
os.makedirs(indiv_test_path, exist_ok=True)
for domain, items in data["dev"].items():
domain = "_".join(domain.split("_")[:-1])
dump_path = indiv_train_path / f"{domain}.5-shot-train.jsonl"
dump(items, dump_path)
for domain, items in data["val"].items():
domain = "_".join(domain.split("_")[:-1])
dump_path = indiv_valid_path / f"{domain}.val.jsonl"
dump(items, dump_path)
for domain, items in data["test"].items():
domain = "_".join(domain.split("_")[:-1])
dump_path = indiv_test_path / f"{domain}.test.jsonl"
dump(items, dump_path)
combined_val = [item for _, items in data["val"].items() for item in items]
dump(combined_val, output_directory / f"combined_valid.jsonl")
combined_test = [item for _, items in data["test"].items() for item in items]
dump(combined_test, output_directory / f"combined_test.jsonl")
def make_five_shot_multitask_data(data, output_directory):
indiv_valid_path = output_directory / "individual_valid"
indiv_test_path = output_directory / "individual_test"
os.makedirs(indiv_valid_path, exist_ok=True)
os.makedirs(indiv_test_path, exist_ok=True)
for domain, items in data["val"].items():
domain = "_".join(domain.split("_")[:-1])
dump_path = indiv_valid_path / f"{domain}.val.jsonl"
dump(items, dump_path)
for domain, items in data["test"].items():
domain = "_".join(domain.split("_")[:-1])
dump_path = indiv_test_path / f"{domain}.test.jsonl"
dump(items, dump_path)
combined_train = [item for _, items in data["dev"].items() for item in items]
dump(combined_train, output_directory / f"train.jsonl")
combined_val = [item for _, items in data["val"].items() for item in items]
dump(combined_val, output_directory / f"combined_valid.jsonl")
combined_test = [item for _, items in data["test"].items() for item in items]
dump(combined_test, output_directory / f"combined_test.jsonl")
def make_full_transfer_data(data, output_directory):
indiv_valid_path = output_directory / "individual_valid"
indiv_test_path = output_directory / "individual_test"
os.makedirs(indiv_valid_path, exist_ok=True)
os.makedirs(indiv_test_path, exist_ok=True)
for domain, items in data["val"].items():
domain = "_".join(domain.split("_")[:-1])
dump_path = indiv_valid_path / f"{domain}.val.jsonl"
dump(items, dump_path)
for domain, items in data["test"].items():
domain = "_".join(domain.split("_")[:-1])
dump_path = indiv_test_path / f"{domain}.test.jsonl"
dump(items, dump_path)
combined_auxilary = [item for _, items in data["auxiliary_train"].items() for item in items]
random.seed(10)
random.shuffle(combined_auxilary)
auxillary_valid = combined_auxilary[-5000:]
auxiliary_train = combined_auxilary[:-5000]
dump(auxillary_valid, output_directory / f"auxillary_valid.jsonl")
combined_train = [item for _, items in data["dev"].items() for item in items]
full_train = auxiliary_train + combined_train
dump(full_train, output_directory / f"train.jsonl")
combined_val = [item for _, items in data["val"].items() for item in items]
dump(combined_val, output_directory / f"combined_valid.jsonl")
combined_test = [item for _, items in data["test"].items() for item in items]
dump(combined_test, output_directory / f"combined_test.jsonl")
def main(output_directory):
original_data_directory = maybe_download_data(output_directory)
all_data = parse_all_mmlu_data(original_data_directory)
make_five_shot_data(all_data, output_directory / "5-shot")
make_five_shot_multitask_data(all_data, output_directory / "5-shot-multitask")
make_full_transfer_data(all_data, output_directory / "full")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"""Downloads, parses and creates train, validation and test files for MMLU.
We consider 3 tasks:
* 5-shot: learn a model with 5 examples for each domain.
* 5-shot-multitask: Learn a single model using the combination of 5 examples from each domain.
* full: Learn a single model using training data from MMLU's auxialluary datasets, plus the training data from 5-shot-multitask.
In each case, overall test accuracy would be the micro average over each domains' test set (as defined by the orginal authors).
The script will download the data, and create the following directory structure:
βββ data.tar # original data
βββ 5-shot
βΒ Β βββ combined_test.jsonl
βΒ Β βββ combined_valid.jsonl
βΒ Β βββ individual_test
βΒ Β βΒ Β βββ {domain}.test.jsonl
βΒ Β βββ individual_train
βΒ Β βΒ Β βββ {domain}.5-shot-train.jsonl
βΒ Β βββ individual_valid
βΒ Β βββ {domain}.val.jsonl
βββ 5-shot-multitask
βΒ Β βββ combined_test.jsonl
βΒ Β βββ combined_valid.jsonl
βΒ Β βββ individual_test
βΒ Β βΒ Β βββ {domain}.test.jsonl
βΒ Β βββ individual_valid
βΒ Β βΒ Β βββ {domain}.val.jsonl
βΒ Β βββ train.jsonl
βββ full
βββ auxillary_valid.jsonl
βββ combined_test.jsonl
βββ combined_valid.jsonl
βββ individual_test
βΒ Β βββ {domain}.test.jsonl
βββ individual_valid
βΒ Β βββ {domain}.val.jsonl
βββ train.jsonl
* For 5-shot, train models 5-shot/individual_train/{domain}.5-shot-train.jsonl and test on 5-shot/individual_test/{domain}.test.jsonl
* For 5-shot-multitask, train models 5-shot-multitask/train.jsonl and test on 5-shot-multitask/combined_test.jsonl
* For the full data task, train models full/train.jsonl and test on full/combined_test.jsonl
"""
)
parser.add_argument(
"--output_directory",
type=str,
default="./data/",
help="Path to the file to which the dataset is written.",
)
args = parser.parse_args()
output_directory = Path(args.output_directory) / "data" / "mmlu_data"
main(output_directory)
|
atlas-main
|
preprocessing/prepare_mmlu.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from download_tools import get_download_path, get_s3_path, maybe_download_file
AVAILABLE_CORPORA = {
"corpora/wiki/enwiki-dec2017": {
"corpus": "corpora/wiki/enwiki-dec2017",
"description": "Wikipedia dump from Dec 2017, preprocessed into passages",
"files": ["text-list-100-sec.jsonl", "infobox.jsonl"],
},
"corpora/wiki/enwiki-dec2018": {
"corpus": "corpora/wiki/enwiki-dec2018",
"description": "Wikipedia dump from Dec 2018, preprocessed into passages",
"files": ["text-list-100-sec.jsonl", "infobox.jsonl"],
},
"corpora/wiki/enwiki-aug2019": {
"corpus": "corpora/wiki/enwiki-aug2019",
"description": "Wikipedia dump from Aug 2019, preprocessed into passages",
"files": ["text-list-100-sec.jsonl", "infobox.jsonl"],
},
"corpora/wiki/enwiki-dec2020": {
"corpus": "corpora/wiki/enwiki-dec2020",
"description": "Wikipedia dump from Dec 2020, preprocessed into passages",
"files": ["text-list-100-sec.jsonl", "infobox.jsonl"],
},
"corpora/wiki/enwiki-dec2021": {
"corpus": "corpora/wiki/enwiki-dec2021",
"description": "Wikipedia dump from Dec 2021, preprocessed into passages",
"files": ["text-list-100-sec.jsonl", "infobox.jsonl"],
},
}
def _helpstr():
helpstr = "The following corpora are available for download: "
for m in AVAILABLE_CORPORA.values():
helpstr += f'\nCorpus name: {m["corpus"]:<30} Description: {m["description"]}'
helpstr += "\ndownload by passing --corpus {corpus name}"
return helpstr
def main(output_directory, requested_corpus):
AVAILABLE_CORPORA[requested_corpus]
for filename in AVAILABLE_CORPORA[requested_corpus]["files"]:
path = f"{requested_corpus}/{filename}"
source = get_s3_path(path)
target = get_download_path(output_directory, path)
maybe_download_file(source, target)
if __name__ == "__main__":
help_str = _helpstr()
choices = list(AVAILABLE_CORPORA.keys())
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--output_directory",
type=str,
default="./data",
help="Path to the file to which the dataset is written.",
)
parser.add_argument(
"--corpus",
type=str,
choices=choices,
help=help_str,
)
args = parser.parse_args()
main(args.output_directory, args.corpus)
|
atlas-main
|
preprocessing/download_corpus.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
logger = logging.getLogger(__name__)
class Options:
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialize_parser()
def initialize_parser(self):
# basic parameters
self.parser.add_argument(
"--name", type=str, default="experiment_name", help="name of the experiment - also used as directory name "
)
self.parser.add_argument(
"--checkpoint_dir",
type=str,
default="./checkpoint/",
help="models are saved here",
)
self.parser.add_argument(
"--model_path",
type=str,
default="none",
help="Path to a pretrained model to initialize from (pass 'none' to init from t5 and contriever)",
)
self.parser.add_argument(
"--per_gpu_batch_size",
default=1,
type=int,
help="Batch size per GPU/CPU for training.",
)
self.parser.add_argument(
"--per_gpu_embedder_batch_size",
default=512,
type=int,
help="Embedder's batch size per GPU.",
)
self.parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
self.parser.add_argument(
"--main_port",
type=int,
default=-1,
help="Main port (for multi-node jobs)",
)
self.parser.add_argument("--seed", type=int, default=0, help="random seed for initialization")
self.parser.add_argument(
"--log_freq",
type=int,
default=100,
help="log train stats <log_freq> steps during training",
)
self.parser.add_argument(
"--eval_freq",
type=int,
default=500,
help="evaluate model every <eval_freq> steps during training",
)
self.parser.add_argument(
"--save_freq",
type=int,
default=5000,
help="save model every <save_freq> steps during training",
)
self.parser.add_argument(
"--train_data", nargs="+", default=[], help="list of space-separated paths to jsonl-formatted train sets"
)
self.parser.add_argument(
"--eval_data",
nargs="+",
default=[],
help="list of space-separated paths to jsonl-formatted evaluation sets",
)
self.parser.add_argument("--write_results", action="store_true", help="save evaluation results to file")
self.parser.add_argument(
"--dont_write_passages",
action="store_true",
help="if writing results, passages can take up a lot of space, pass this flag not to write passages as part of dumped results",
)
def add_optim_options(self):
self.parser.add_argument("--warmup_steps", type=int, default=1000, help="number of learning rate warmup steps")
self.parser.add_argument("--total_steps", type=int, default=1000, help="total number of training steps")
self.parser.add_argument(
"--scheduler_steps",
type=int,
default=None,
help="total number of step for the scheduler, if None then scheduler_total_step = total_step",
)
self.parser.add_argument("--accumulation_steps", type=int, default=1, help="gradient accumulation")
self.parser.add_argument("--dropout", type=float, default=0.1, help="dropout rate")
self.parser.add_argument("--lr", type=float, default=1e-4, help="learning rate")
self.parser.add_argument("--lr_retriever", type=float, default=1e-5, help="learning rate for retriever")
self.parser.add_argument("--clip", type=float, default=1.0, help="gradient clipping")
self.parser.add_argument(
"--scheduler",
type=str,
default="cosine",
choices=["linear", "cosine", "fixed"],
help="learning rate schedule to use",
)
self.parser.add_argument(
"--weight_decay", type=float, default=0.1, help="amount of weight decay to apply in training"
)
self.parser.add_argument(
"--save_optimizer", action="store_true", help="Pass flag to save optimizer state in saved checkpoints"
)
self.parser.add_argument("--epsilon", type=float, default=1e-6, help="adamw epsilon value")
self.parser.add_argument("--alpha", type=float, default=1.0, help="adamw alpha value")
self.parser.add_argument("--beta2", type=float, default=0.999, help="adamw beta2 value")
self.parser.add_argument(
"--refresh_index",
type=str,
default="-1",
help="index refresh schedule. format: startstep-endstep:refreshrate,startstep-endstep:refreshrate "
"e.g. --refresh_index 0-100:10,100-1000000:500 will refresh the index every 10 steps for the first 100 steps, "
"and then every 500 steps from step 100 to 1M."
"Syntactic Sugar for a fixed schedule: can just pass in a single number e.g. --refresh_index 100 will refresh the index every 100 steps. "
"-1 to never refresh.",
)
self.parser.add_argument("--shuffle", action="store_true", help="shuffle data for training")
# memory optimizations:
self.parser.add_argument(
"--precision",
type=str,
default="fp32",
choices=["fp16", "fp32", "bf16"],
help="numerical precision - recommend bf16 if available, fp16 likely to be unstable for training",
)
self.parser.add_argument(
"--shard_optim",
action="store_true",
help="train-time memory optimization: shards optimizer state over available GPUs using sharded data parallel, recommended for larger models",
)
self.parser.add_argument(
"--shard_grads",
action="store_true",
help="train-time memory optimization: shards gradients over available GPUs using sharded data parallel, recommended for larger models",
)
self.parser.add_argument(
"--use_gradient_checkpoint_reader",
action="store_true",
help="use gradient checkpointing in the reader",
)
self.parser.add_argument(
"--use_gradient_checkpoint_retriever",
action="store_true",
help="use gradient checkpointing for retriever",
)
def add_modeling_options(self):
self.parser.add_argument(
"--reader_model_type",
required=True,
type=str,
help="t5 Architecture for reader FID model, e.g. google/t5-xl-lm-adapt",
choices=[
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
"google/t5-v1_1-base",
"google/t5-v1_1-large",
"google/t5-v1_1-xl",
"google/t5-v1_1-xxl",
"google/t5-base-lm-adapt",
"google/t5-large-lm-adapt",
"google/t5-xl-lm-adapt",
"google/t5-xxl-lm-adapt",
],
)
self.parser.add_argument(
"--text_maxlength",
type=int,
default=200,
help="maximum number of tokens in input text segments (concatenated question+passage). Inputs longer than this will be truncated.",
)
self.parser.add_argument(
"--target_maxlength",
type=int,
default=None,
help="Maximum length of target outputs in tokens when training the model. Targets longer than this will be truncated. No truncation if -1",
)
self.parser.add_argument("--n_context", type=int, default=1, help="number of top k passages to pass to reader")
# Retriever modelling options
self.parser.add_argument(
"--passages",
nargs="+",
help="list of paths to jsonl files containing passages to index and retrieve from. Unused if loading a saved index using --load_index_path",
)
self.parser.add_argument(
"--max_passages",
type=int,
default=-1,
help="maximum number of passages to index. -1 to read all passages in passage files",
)
self.parser.add_argument(
"--retriever_model_path",
type=str,
default="facebook/contriever",
help="path to contriever model to init from (overridden if passing a value to --model_path ",
)
self.parser.add_argument(
"--retrieve_only",
action="store_true",
help="Pass this to prevent loading a reader, and only run retrieval evaluation",
)
self.parser.add_argument(
"--train_retriever", action="store_true", help="Pass to train retriever as well as reader"
)
self.parser.add_argument(
"--use_file_passages",
action="store_true",
help='uses passages in "passages" field in train or eval jsonl files rather than retrieving passages',
)
self.parser.add_argument(
"--retriever_n_context",
type=int,
default=5,
help="number of top k passages to use to train the retriever with",
)
self.parser.add_argument(
"--gold_score_mode",
type=str,
choices=["evalnormsum", "loop", "ppmean", "emdr", "pdist", "adist"],
default="ppmean",
help="retriever training method. `pdist` is the name used in the paper for `ppmean`. `adist` is the name used in the paper for `evalnormsum`",
)
self.parser.add_argument(
"--closed_book",
action="store_true",
help="Dont use retrieval - reduces to T5. Overrides n_context, n_context_retriever and encoder_format if they are set",
)
self.parser.add_argument(
"--temperature_score", type=float, default=0.01, help="softmax temperature for retriever"
)
self.parser.add_argument(
"--temperature_gold",
type=float,
default=0.01,
help="softmax temperature for target distribution for retriever distillation",
)
self.parser.add_argument("--compute_crossattention_stats", action="store_true")
self.parser.add_argument(
"--filtering_overretrieve_ratio",
type=int,
default=2,
help="if filtering, over-retrieve the topK by this factor, and then filter out undesirable results. Useful, Set to 1 only if using a task that doesn't filter retrieved results",
)
self.parser.add_argument("--freeze_retriever_steps", type=int, default=-1, help="freezes retriever for n steps")
self.parser.add_argument(
"--query_side_retriever_training",
action="store_true",
help="pass to enable query-side finetuning of retriever (unties the parameters of the contriever encoder's passage and query encoders, and freezes the passage encoder. Useful to avoid index refreshes.",
)
self.parser.add_argument(
"--retrieve_with_rerank",
action="store_true",
help="pass this to enable reranking with fresh passage encoder for retriever",
)
self.parser.add_argument(
"--n_to_rerank_with_retrieve_with_rerank",
type=int,
default=128,
help="n passages to rerank when passing --retrieve_with_rerank. Higher is slower but more accurate. Recommend 64-128",
)
# input and output formatting options:
self.parser.add_argument(
"--decoder_format", # TODO: decide whether to remove functionality
type=str,
default=None,
help="format for decoder, model will be train on the format and evaluation will be performed with the format contrary to the decoder_prompt_format option",
)
self.parser.add_argument( # TODO: decide whether to remove functionality
"--decoder_prompt_format",
type=str,
default=None,
help='format for decoder prompting, for instance "what is the answer to {query}:"',
)
self.parser.add_argument(
"--encoder_format",
type=str,
default="{query} title: {title} context: {text}",
help="format string for reader's encoder preprocessing",
)
self.parser.add_argument(
"--retriever_format",
type=str,
default="{title} {text}",
help="format string for retriever's encoder preprocessing",
)
# Generation options
self.parser.add_argument("--generation_max_length", type=int, default=128)
self.parser.add_argument("--generation_min_length", type=int, default=None)
self.parser.add_argument("--generation_length_penalty", type=float, default=1.0)
self.parser.add_argument("--generation_num_beams", type=int, default=1)
# Task-specific options:
self.parser.add_argument(
"--task",
type=str,
default=None,
choices=["base", "mlm", "lm", "multiple_choice", "kilt", "section", "fever", "qa"],
help="Task performed by the model. Used to setup preprocessing, retrieval filtering, evaluations, etc.",
)
# MLM task options:
self.parser.add_argument(
"--mlm_noise_density",
type=float,
default=0.15,
help="how much of an input text should be masked by masking spans ",
)
self.parser.add_argument(
"--mlm_mean_noise_span_length", type=float, default=3, help="average length of an MLM masking span"
)
self.parser.add_argument(
"--min_words_per_lm_instance",
type=int,
default=None,
help="Instances with fewer than min_words_per_lm_instance instances will be skipped for MLM/LM/Section Generation",
)
# LM task options:
self.parser.add_argument(
"--min_lm_context_ratio",
type=float,
default=0.5,
help="Splits text into two segments for language modelling.'\
'Left segment is conditioning context, right segment is for generating.'\
'The left segment must be more than min_lm_context_ratio of the the right segment",
)
self.parser.add_argument(
"--max_lm_context_ratio",
type=float,
default=0.5,
help="Splits text into two segments for language modelling.'\
'Left segment is conditioning context, right segment is for generating.'\
'The left segment must be less than than max_lm_context_ratio of the the right segment",
)
# Open-domain task options:
self.parser.add_argument(
"--qa_prompt_format",
type=str,
default="question: {question} answer: <extra_id_0>",
help="How to format question as input prompts when using --task qa",
)
# Multiple Choice task options:
self.parser.add_argument(
"--multiple_choice_num_options",
type=int,
default=4,
help="How many choice options for multiple choice QA (MMLU is 4)",
)
self.parser.add_argument(
"--multiple_choice_train_permutations",
choices=["single", "cyclic", "all"],
default="single",
type=str,
help="Whether to train with answer order permutations When training on multiple choice (e.g. MMLU)."
" Can improve results by de-biasing models's preferences for arbitrary answer orderings. Recommend training with 'all'. "
"single: no permutations. cyclic: cyclic permutations. all: all possible answer order permutations'",
)
self.parser.add_argument(
"--multiple_choice_eval_permutations",
choices=["single", "cyclic", "all"],
default="single",
type=str,
help="Whether to evaluate with answer order permutations for multiple choice (e.g. MMLU)."
" Can improve results by de-biasing models's preferences for arbitrary answer orderings. Best results with 'all' but very slow. 'cyclic' is a good compromise. "
"single: no permutations. cyclic: cyclic permutations. all: all possible answer order permutations'",
)
def add_index_options(self):
self.parser.add_argument(
"--load_index_path",
default=None,
type=str,
help="path for loading the index, passage embeddings and passages",
)
self.parser.add_argument(
"--save_index_path",
default=None,
type=str,
help="path for saving the index and/or embeddings",
)
self.parser.add_argument(
"--save_index_n_shards",
default=128,
type=int,
help="how many shards to save an index to file with. Must be an integer multiple of the number of workers.",
)
self.parser.add_argument(
"--index_mode",
type=str,
default="flat",
help="Use flat torch index or a faiss index for retrieving the k nearest neighbors",
choices=["flat", "faiss"],
)
# faiss options:
self.parser.add_argument(
"--faiss_index_type",
type=str,
default="flat",
help="IVFFlat, IndexFlatIP, IVFScalarQuantizer or IndexIVFPQ with faiss-gpu",
choices=["ivfflat", "flat", "ivfsq", "ivfpq", "pq"],
)
self.parser.add_argument("--faiss_code_size", type=int, default=None, help="Parameter for PQ/SQ quantization")
def print_options(self, opt):
message = "\n"
for k, v in sorted(vars(opt).items()):
comment = ""
default_value = self.parser.get_default(k)
if v != default_value:
comment = f"\t(default: {default_value})"
message += f"{k:>30}: {str(v):<40}{comment}\n"
expr_dir = Path(opt.checkpoint_dir) / opt.name
with open(expr_dir / "opt.log", "wt") as opt_file:
opt_file.write(message)
opt_file.write("\n")
logger.info(message)
def parse(self):
opt = self.parser.parse_args()
if opt.closed_book: # override flags to enable closed book mode
opt.n_context = 1
opt.retriever_n_context = 1
opt.encoder_format = "{query}"
opt.use_file_passages = True
if opt.gold_score_mode == "pdist": # allow paper name of retriever losses
opt.gold_score_mode = "ppmean"
if opt.gold_score_mode == "adist": # allow paper name of retriever losses
opt.gold_score_mode = "evalnormsum"
if (
opt.use_file_passages
): # if passing use_file_passges, the following should be false (There is no retreiver loaded in this case)
opt.train_retriever = False
opt.query_side_retriever_training = False
opt.use_gradient_checkpoint_retriever = False
return opt
def get_options():
options = Options()
options.add_index_options()
options.add_modeling_options()
options.add_optim_options()
return options
|
atlas-main
|
src/options.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import os
import pickle
from typing import Optional, Set, Tuple, Union, Any
import faiss
import faiss.contrib.torch_utils
import numpy as np
import torch
from src import dist_utils
from src.retrievers import EMBEDDINGS_DIM
FAISSGPUIndex = Union[
faiss.GpuIndexIVFFlat, faiss.GpuIndexIVFPQ, faiss.GpuIndexIVFScalarQuantizer, faiss.GpuIndexFlatIP
]
FAISSIndex = Union[FAISSGPUIndex, faiss.IndexPQ]
GPUIndexConfig = Union[
faiss.GpuIndexIVFPQConfig,
faiss.GpuIndexIVFFlatConfig,
faiss.GpuIndexIVFScalarQuantizerConfig,
faiss.GpuIndexFlatConfig,
]
BITS_PER_CODE: int = 8
CHUNK_SPLIT: int = 3
def serialize_listdocs(ids):
ids = pickle.dumps(ids)
ids = torch.tensor(list(ids), dtype=torch.uint8).cuda()
return ids
def deserialize_listdocs(ids):
return [pickle.loads(x.cpu().numpy().tobytes()) for x in ids]
class DistributedIndex(object):
def __init__(self):
self.embeddings = None
self.doc_map = dict()
self.is_in_gpu = True
def init_embeddings(self, passages, dim: Optional[int] = EMBEDDINGS_DIM):
self.doc_map = {i: doc for i, doc in enumerate(passages)}
self.embeddings = torch.zeros(dim, (len(passages)), dtype=torch.float16)
if self.is_in_gpu:
self.embeddings = self.embeddings.cuda()
def _get_saved_embedding_path(self, save_dir: str, shard: int) -> str:
return os.path.join(save_dir, f"embeddings.{shard}.pt")
def _get_saved_passages_path(self, save_dir: str, shard: int) -> str:
return os.path.join(save_dir, f"passages.{shard}.pt")
def save_index(self, path: str, total_saved_shards: int, overwrite_saved_passages: bool = False) -> None:
"""
Saves index state to disk, which can later be loaded by the load_index method.
Specifically, it saves the embeddings and passages into total_saved_shards separate file shards.
This option enables loading the index in another session with a different number of workers, as long as the number of workers is divisible by total_saved_shards.
Note that the embeddings will always be saved to disk (it will overwrite any embeddings previously saved there).
The passages will only be saved to disk if they have not already been written to the save directory before, unless the option --overwrite_saved_passages is passed.
"""
assert self.embeddings is not None
rank = dist_utils.get_rank()
ws = dist_utils.get_world_size()
assert total_saved_shards % ws == 0, f"N workers must be a multiple of shards to save"
shards_per_worker = total_saved_shards // ws
n_embeddings = self.embeddings.shape[1]
embeddings_per_shard = math.ceil(n_embeddings / shards_per_worker)
assert n_embeddings == len(self.doc_map), len(self.doc_map)
for shard_ind, (shard_start) in enumerate(range(0, n_embeddings, embeddings_per_shard)):
shard_end = min(shard_start + embeddings_per_shard, n_embeddings)
shard_id = shard_ind + rank * shards_per_worker # get global shard number
passage_shard_path = self._get_saved_passages_path(path, shard_id)
if not os.path.exists(passage_shard_path) or overwrite_saved_passages:
passage_shard = [self.doc_map[i] for i in range(shard_start, shard_end)]
with open(passage_shard_path, "wb") as fobj:
pickle.dump(passage_shard, fobj, protocol=pickle.HIGHEST_PROTOCOL)
embeddings_shard = self.embeddings[:, shard_start:shard_end]
embedding_shard_path = self._get_saved_embedding_path(path, shard_id)
torch.save(embeddings_shard, embedding_shard_path)
def load_index(self, path: str, total_saved_shards: int):
"""
Loads sharded embeddings and passages files (no index is loaded).
"""
rank = dist_utils.get_rank()
ws = dist_utils.get_world_size()
assert total_saved_shards % ws == 0, f"N workers must be a multiple of shards to save"
shards_per_worker = total_saved_shards // ws
passages = []
embeddings = []
for shard_id in range(rank * shards_per_worker, (rank + 1) * shards_per_worker):
passage_shard_path = self._get_saved_passages_path(path, shard_id)
with open(passage_shard_path, "rb") as fobj:
passages.append(pickle.load(fobj))
embeddings_shard_path = self._get_saved_embedding_path(path, shard_id)
embeddings.append(torch.load(embeddings_shard_path, map_location="cpu").cuda())
self.doc_map = {}
n_passages = 0
for chunk in passages:
for p in chunk:
self.doc_map[n_passages] = p
n_passages += 1
self.embeddings = torch.concat(embeddings, dim=1)
def _compute_scores_and_indices(self, allqueries: torch.tensor, topk: int) -> Tuple[torch.tensor, torch.tensor]:
"""
Computes the distance matrix for the query embeddings and embeddings chunk and returns the k-nearest neighbours and corresponding scores.
"""
scores = torch.matmul(allqueries.half(), self.embeddings)
scores, indices = torch.topk(scores, topk, dim=1)
return scores, indices
@torch.no_grad()
def search_knn(self, queries, topk):
"""
Conducts exhaustive search of the k-nearest neighbours using the inner product metric.
"""
allqueries = dist_utils.varsize_all_gather(queries)
allsizes = dist_utils.get_varsize(queries)
allsizes = np.cumsum([0] + allsizes.cpu().tolist())
# compute scores for the part of the index located on each process
scores, indices = self._compute_scores_and_indices(allqueries, topk)
indices = indices.tolist()
docs = [[self.doc_map[x] for x in sample_indices] for sample_indices in indices]
if torch.distributed.is_initialized():
docs = [docs[allsizes[k] : allsizes[k + 1]] for k in range(len(allsizes) - 1)]
docs = [serialize_listdocs(x) for x in docs]
scores = [scores[allsizes[k] : allsizes[k + 1]] for k in range(len(allsizes) - 1)]
gather_docs = [dist_utils.varsize_gather(docs[k], dst=k, dim=0) for k in range(dist_utils.get_world_size())]
gather_scores = [
dist_utils.varsize_gather(scores[k], dst=k, dim=1) for k in range(dist_utils.get_world_size())
]
rank_scores = gather_scores[dist_utils.get_rank()]
rank_docs = gather_docs[dist_utils.get_rank()]
scores = torch.cat(rank_scores, dim=1)
rank_docs = deserialize_listdocs(rank_docs)
merge_docs = [[] for _ in range(queries.size(0))]
for docs in rank_docs:
for k, x in enumerate(docs):
merge_docs[k].extend(x)
docs = merge_docs
_, subindices = torch.topk(scores, topk, dim=1)
scores = scores.tolist()
subindices = subindices.tolist()
# Extract topk scores and associated ids
scores = [[scores[k][j] for j in idx] for k, idx in enumerate(subindices)]
docs = [[docs[k][j] for j in idx] for k, idx in enumerate(subindices)]
return docs, scores
def is_index_trained(self) -> bool:
return True
class DistributedFAISSIndex(DistributedIndex):
def __init__(self, index_type: str, code_size: Optional[int] = None):
super().__init__()
self.embeddings = None
self.doc_map = dict()
self.faiss_gpu_index = None
self.gpu_resources = None
self.faiss_index_trained = False
self.faiss_index_type = index_type
self.code_size = code_size
self.is_in_gpu = False
def _get_faiss_index_filename(self, save_index_path: str) -> str:
"""
Creates the filename to save the trained index to using the index type, code size (if not None) and rank.
"""
rank = dist_utils.get_rank()
if self.code_size:
return save_index_path + f"/index{self.faiss_index_type}_{str(self.code_size)}_rank_{rank}.faiss"
return save_index_path + f"/index{self.faiss_index_type}_rank_{rank}.faiss"
def _add_embeddings_to_gpu_index(self) -> None:
"""
Add embeddings to index and sets the nprobe parameter.
"""
assert self.faiss_gpu_index is not None, "The FAISS GPU index was not correctly instantiated."
assert self.faiss_gpu_index.is_trained == True, "The FAISS index has not been trained."
if self.faiss_gpu_index.ntotal == 0:
self._add_embeddings_by_chunks()
def _add_embeddings_by_chunks(self) -> None:
_, num_points = self.embeddings.shape
chunk_size = num_points // CHUNK_SPLIT
split_embeddings = [
self.embeddings[:, 0:chunk_size],
self.embeddings[:, chunk_size : 2 * chunk_size],
self.embeddings[:, 2 * chunk_size : num_points],
]
for embeddings_chunk in split_embeddings:
if isinstance(self.faiss_gpu_index, FAISSGPUIndex.__args__):
self.faiss_gpu_index.add(self._cast_to_torch32(embeddings_chunk.T))
else:
self.faiss_gpu_index.add(self._cast_to_numpy(embeddings_chunk.T))
def _compute_scores_and_indices(self, allqueries: torch.tensor, topk: int) -> Tuple[torch.tensor, torch.tensor]:
"""
Computes the distance matrix for the query embeddings and embeddings chunk and returns the k-nearest neighbours and corresponding scores.
"""
_, num_points = self.embeddings.shape
self.faiss_gpu_index.nprobe = math.floor(math.sqrt(num_points))
self._add_embeddings_to_gpu_index()
if isinstance(self.faiss_gpu_index, FAISSGPUIndex.__args__):
scores, indices = self.faiss_gpu_index.search(self._cast_to_torch32(allqueries), topk)
else:
np_scores, indices = self.faiss_gpu_index.search(self._cast_to_numpy(allqueries), topk)
scores = torch.from_numpy(np_scores).cuda()
return scores.half(), indices
def save_index(self, save_index_path: str, save_index_n_shards: int) -> None:
"""
Saves the embeddings and passages and if there is a FAISS index, it saves it.
"""
super().save_index(save_index_path, save_index_n_shards)
self._save_faiss_index(save_index_path)
def _save_faiss_index(self, path: str) -> None:
"""
Moves the GPU FAISS index to CPU and saves it to a .faiss file.
"""
index_path = self._get_faiss_index_filename(path)
assert self.faiss_gpu_index is not None, "There is no FAISS index to save."
cpu_index = faiss.index_gpu_to_cpu(self.faiss_gpu_index)
faiss.write_index(cpu_index, index_path)
def _load_faiss_index(self, load_index_path: str) -> None:
"""
Loads a FAISS index and moves it to the GPU.
"""
faiss_cpu_index = faiss.read_index(load_index_path)
# move to GPU
self._move_index_to_gpu(faiss_cpu_index)
def load_index(self, path: str, total_saved_shards: int) -> None:
"""
Loads passage embeddings and passages and a faiss index (if it exists).
Otherwise, it initialises and trains the index in the GPU with GPU FAISS.
"""
super().load_index(path, total_saved_shards)
load_index_path = self._get_faiss_index_filename(path)
if os.path.exists(load_index_path):
self._load_faiss_index(load_index_path)
else:
self.train_index()
def is_index_trained(self) -> bool:
if self.faiss_gpu_index is None:
return self.faiss_index_trained
return not self.faiss_gpu_index.is_trained
def _initialise_index(self) -> None:
"""
Initialises the index in the GPU with GPU FAISS.
Supported gpu index types: IVFFlat, IndexFlatIP, IndexIVFPQ, IVFSQ.
"""
dimension, num_points = self.embeddings.shape
# @TODO: Add support to set the n_list and n_probe parameters.
n_list = math.floor(math.sqrt(num_points))
self.faiss_gpu_index = self.gpu_index_factory(dimension, n_list)
@torch.no_grad()
def _set_gpu_options(self) -> faiss.GpuMultipleClonerOptions:
"""
Returns the GPU cloner options neccessary when moving a CPU index to the GPU.
"""
cloner_opts = faiss.GpuClonerOptions()
cloner_opts.useFloat16 = True
cloner_opts.usePrecomputed = False
cloner_opts.indicesOptions = faiss.INDICES_32_BIT
return cloner_opts
@torch.no_grad()
def _set_index_config_options(self, index_config: GPUIndexConfig) -> GPUIndexConfig:
"""
Returns the GPU config options for GPU indexes.
"""
index_config.device = torch.cuda.current_device()
index_config.indicesOptions = faiss.INDICES_32_BIT
index_config.useFloat16 = True
return index_config
def _create_PQ_index(self, dimension) -> FAISSIndex:
"""
GPU config options for PQ index
"""
cpu_index = faiss.index_factory(dimension, "PQ" + str(self.code_size), faiss.METRIC_INNER_PRODUCT)
cfg = self._set_gpu_options()
return faiss.index_cpu_to_gpu(self.gpu_resources, self.embeddings.get_device(), cpu_index, cfg)
@torch.no_grad()
def gpu_index_factory(self, dimension: int, n_list: Optional[int] = None) -> FAISSIndex:
"""
Instantiates and returns the selected GPU index class.
"""
self.gpu_resources = faiss.StandardGpuResources()
if self.faiss_index_type == "ivfflat":
config = self._set_index_config_options(faiss.GpuIndexIVFFlatConfig())
return faiss.GpuIndexIVFFlat(
self.gpu_resources,
dimension,
n_list,
faiss.METRIC_INNER_PRODUCT,
config,
)
elif self.faiss_index_type == "flat":
config = self._set_index_config_options(faiss.GpuIndexFlatConfig())
return faiss.GpuIndexFlatIP(self.gpu_resources, dimension, config)
elif self.faiss_index_type == "pq":
return self._create_PQ_index(dimension)
elif self.faiss_index_type == "ivfpq":
config = self._set_index_config_options(faiss.GpuIndexIVFPQConfig())
return faiss.GpuIndexIVFPQ(
self.gpu_resources,
dimension,
n_list,
self.code_size,
BITS_PER_CODE,
faiss.METRIC_INNER_PRODUCT,
config,
)
elif self.faiss_index_type == "ivfsq":
config = self._set_index_config_options(faiss.GpuIndexIVFScalarQuantizerConfig())
qtype = faiss.ScalarQuantizer.QT_4bit
return faiss.GpuIndexIVFScalarQuantizer(
self.gpu_resources,
dimension,
n_list,
qtype,
faiss.METRIC_INNER_PRODUCT,
True,
config,
)
else:
raise ValueError("unsupported index type")
@torch.no_grad()
def train_index(self) -> None:
"""
It initialises the index and trains it according to the refresh index schedule.
"""
if self.faiss_gpu_index is None:
self._initialise_index()
self.faiss_gpu_index.reset()
if isinstance(self.faiss_gpu_index, FAISSGPUIndex.__args__):
self.faiss_gpu_index.train(self._cast_to_torch32(self.embeddings.T))
else:
self.faiss_gpu_index.train(self._cast_to_numpy(self.embeddings.T))
@torch.no_grad()
def _cast_to_torch32(self, embeddings: torch.tensor) -> torch.tensor:
"""
Converts a torch tensor to a contiguous float 32 torch tensor.
"""
return embeddings.type(torch.float32).contiguous()
@torch.no_grad()
def _cast_to_numpy(self, embeddings: torch.tensor) -> np.ndarray:
"""
Converts a torch tensor to a contiguous numpy float 32 ndarray.
"""
return embeddings.cpu().to(dtype=torch.float16).numpy().astype("float32").copy(order="C")
@torch.no_grad()
def _move_index_to_gpu(self, cpu_index: FAISSIndex) -> None:
"""
Moves a loaded index to GPU.
"""
self.gpu_resources = faiss.StandardGpuResources()
cfg = self._set_gpu_options()
self.faiss_gpu_index = faiss.index_cpu_to_gpu(self.gpu_resources, torch.cuda.current_device(), cpu_index, cfg)
|
atlas-main
|
src/index.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import os
import signal
import socket
import subprocess
import sys
from logging import getLogger
import torch
logger = getLogger()
GLOO_GROUP = None
def sig_handler(signum, frame):
logger.warning("Signal handler called with signal " + str(signum))
prod_id = int(os.environ["SLURM_PROCID"])
logger.warning("Host: %s - Global rank: %i" % (socket.gethostname(), prod_id))
if prod_id == 0:
logger.warning("Requeuing job " + os.environ["SLURM_JOB_ID"])
os.system("scontrol requeue " + os.environ["SLURM_JOB_ID"])
else:
logger.warning("Not the main process, no need to requeue.")
sys.exit(-1)
def term_handler(signum, frame):
logger.warning("Signal handler called with signal " + str(signum))
logger.warning("Bypassing SIGTERM.")
def init_signal_handler():
"""
Handle signals sent by SLURM for time limit / pre-emption.
"""
signal.signal(signal.SIGUSR1, sig_handler)
signal.signal(signal.SIGTERM, term_handler)
# logger.warning("Signal handler installed.")
def init_distributed_mode(params):
"""
Handle single and multi-GPU / multi-node / SLURM jobs.
Initialize the following variables:
- n_nodes
- node_id
- local_rank
- global_rank
- world_size
"""
# params.is_slurm_job = 'SLURM_JOB_ID' in os.environ
params.is_slurm_job = "SLURM_JOB_ID" in os.environ and not "WORLD_SIZE" in os.environ
has_local_rank = hasattr(params, "local_rank")
# SLURM job
if params.is_slurm_job and has_local_rank:
assert params.local_rank == -1 # on the cluster, this is handled by SLURM
SLURM_VARIABLES = [
"SLURM_JOB_ID",
"SLURM_JOB_NODELIST",
"SLURM_JOB_NUM_NODES",
"SLURM_NTASKS",
"SLURM_TASKS_PER_NODE",
"SLURM_MEM_PER_NODE",
"SLURM_MEM_PER_CPU",
"SLURM_NODEID",
"SLURM_PROCID",
"SLURM_LOCALID",
"SLURM_TASK_PID",
]
PREFIX = "%i - " % int(os.environ["SLURM_PROCID"])
for name in SLURM_VARIABLES:
value = os.environ.get(name, None)
# print(PREFIX + "%s: %s" % (name, str(value)))
# # job ID
# params.job_id = os.environ['SLURM_JOB_ID']
# number of nodes / node ID
params.n_nodes = int(os.environ["SLURM_JOB_NUM_NODES"])
params.node_id = int(os.environ["SLURM_NODEID"])
# local rank on the current node / global rank
params.local_rank = int(os.environ["SLURM_LOCALID"])
params.global_rank = int(os.environ["SLURM_PROCID"])
# number of processes / GPUs per node
params.world_size = int(os.environ["SLURM_NTASKS"])
params.n_gpu_per_node = params.world_size // params.n_nodes
# define master address and master port
hostnames = subprocess.check_output(["scontrol", "show", "hostnames", os.environ["SLURM_JOB_NODELIST"]])
params.main_addr = hostnames.split()[0].decode("utf-8")
assert 10001 <= params.main_port <= 20000 or params.world_size == 1
# print(PREFIX + "Master address: %s" % params.master_addr)
# print(PREFIX + "Master port : %i" % params.master_port)
# set environment variables for 'env://'
os.environ["MASTER_ADDR"] = params.main_addr
os.environ["MASTER_PORT"] = str(params.main_port)
os.environ["WORLD_SIZE"] = str(params.world_size)
os.environ["RANK"] = str(params.global_rank)
params.is_distributed = True
# multi-GPU job (local or multi-node) - jobs started with torch.distributed.launch
elif has_local_rank and params.local_rank != -1:
assert params.main_port == -1
# read environment variables
params.global_rank = int(os.environ["RANK"])
params.world_size = int(os.environ["WORLD_SIZE"])
params.n_gpu_per_node = int(os.environ["NGPU"])
# number of nodes / node ID
params.n_nodes = params.world_size // params.n_gpu_per_node
params.node_id = params.global_rank // params.n_gpu_per_node
params.is_distributed = True
else:
params.local_rank = 0
params.global_rank = 0
params.world_size = 1
params.is_distributed = False
params.n_nodes = 1
params.node_id = 0
params.n_gpu_per_node = 1
# define whether this is the master process / if we are in distributed mode
params.is_main = params.node_id == 0 and params.local_rank == 0
params.multi_node = params.n_nodes > 1
params.multi_gpu = params.world_size > 1
# summary
PREFIX = "%i - " % params.global_rank
# set GPU device
if params.is_distributed:
torch.cuda.set_device(params.local_rank)
device = torch.device("cuda", params.local_rank)
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
params.device = device
# initialize multi-GPU
if params.is_distributed:
# http://pytorch.apachecn.org/en/0.3.0/distributed.html#environment-variable-initialization
# 'env://' will read these environment variables:
# MASTER_PORT - required; has to be a free port on machine with rank 0
# MASTER_ADDR - required (except for rank 0); address of rank 0 node
# WORLD_SIZE - required; can be set either here, or in a call to init function
# RANK - required; can be set either here, or in a call to init function
# print("Initializing PyTorch distributed ...")
# Fix for if gloo sockets are inconsistent
p1 = subprocess.Popen(["ip", "r"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "default"], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
gloo_socket_ifname = subprocess.check_output(["awk", "{print $5}"], stdin=p2.stdout).decode("utf-8").strip()
p2.stdout.close()
os.environ["GLOO_SOCKET_IFNAME"] = gloo_socket_ifname
torch.distributed.init_process_group(
init_method="env://",
backend="nccl",
)
global GLOO_GROUP
GLOO_GROUP = torch.distributed.new_group(
list(range(params.world_size)), backend="gloo", timeout=datetime.timedelta(0, 600)
)
def get_gloo_group():
global GLOO_GROUP
assert GLOO_GROUP is not None
return GLOO_GROUP
|
atlas-main
|
src/slurm.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.