python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
maps = """MIR # 67.40 # QA_mir_lr=3e-5_ep=10_rs=32_rf=3_mcs=256_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
CFT # 61.58 # QA_simplecl_lr=3e-5_ep=10_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
ER # 66.62 # QA_er_lr=3e-5_ep=10_rs=32_rf=3_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
MaxLoss# 66.55 # QA_mir_lr=3e-5_ep=10_rs=32_rf=3_mcs=256_largest_afterloss_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
OnL2Reg # 65.09 # qa_simplecl_lr=3e-5_ep=10_l2w=1_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
OnEWC # 65.31 # qa_oewc_lr=3e-5_ep=10_lbd=250_gm=9e-1_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
*Frozen # 45.77 # QA_nonecl_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
"""
"""
*Frozen # 45.77 # QA_nonecl_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
"""
import os
import json
import pandas as pd
import altair as alt
from cmr.notebooks.draw_utils import draw_stacked_bars, draw_curve
# os.chdir("experiments/results/qa/")
all_data = []
for line in maps.splitlines():
name, OECT, path = [i.strip() for i in line.split("#")]
# print(name, OECT, path)
o = json.load(open("experiments/results/qa_backup_1113/" + path))
# debugger_args = eval(o["debugger_args"])
# data_args = eval(o["data_args"])
r = o["online_eval_results"]
for item in r:
item["prefix"] = name
if item["timecode"] == 99:
item["timecode"] += 1
all_data += r
# print(o)
# EFRs = [item["EFR"] for item in online]
# UKRs = [item["UKR"] for item in online if "UKR" in item]
# OKRs = [item["OKR"] for item in online if "OKR" in item]
# KGs = [item["KG"] for item in online if "KG" in item]
# CSRs = [item["CSR"] for item in online if "CSR" in item]
# for item in all_data:
# if item["name"] == "*Frozne":
# item["OKR"] = 0
# else:
# if item["OKR"]
all_data = pd.DataFrame(all_data)
# all_data = all_data.drop(columns=["before_eval_results", "before_error_ids", "mir_buffer_ids", "retrieved_ids", "OKR_sampled_ids"])
def flatten_list(lst):
lst = list(lst)
flst = []
for l in lst:
flst += l
return flst
def flatten_predictions(before_eval_results):
lst = list(before_eval_results)
flst = []
scores = []
for l in lst:
flst += l["predictions"]
# scores += l["metric_results"]["EM"]
return flst, scores
def jaccard(list1, list2):
list1 = set(list1)
list2 = set(list2)
intersection = len(list1 & list2)
union = (len(list1) + len(list2)) - intersection
return float(intersection) / union
def error_sim(all_data, span=[0, 10], methods=["CFT", "OnL2Reg", "OnEWC", "ER", "MaxLoss", "MIR"]):
df = all_data[(all_data.timecode<=span[1]) & (all_data.timecode>=span[0])]
sims = []
for method_1 in methods:
for method_2 in methods:
if method_1 == method_2:
continue
# errors1 = flatten_list(df[df.prefix==method_1]["before_error_ids"])
# errors2 = flatten_list(df[df.prefix==method_2]["before_error_ids"])
errors1, scores1 = flatten_predictions(df[df.prefix==method_1]["before_eval_results"])
errors2, scores2 = flatten_predictions(df[df.prefix==method_2]["before_eval_results"])
# if len(errors1) == 0:
# continue
assert len(errors1) == len(errors2)
sim = sum([p1!=p2 for p1, p2 in zip(errors1, errors2)])/len(errors1)
# sim = jaccard(errors1, errors2)
sims.append({"method1": method_1, "method2": method_2, "sim": sim})
print(f"{method_1}-{method_2}: {sim}")
sims = pd.DataFrame(sims)
fig = alt.Chart(sims).mark_rect().encode(
x=alt.X('method1:O', sort=methods),
y=alt.Y('method2:O', sort=methods),
# color='sim:Q'
color = alt.Color('sim:Q',scale=alt.Scale(domain=[0.35, 0.45]))
)
fig = fig.properties(width=500, height=500).configure_title(fontSize=0,
).configure_axis(
labelFontSize=30,
titleFontSize=0,
)
fig.save(f"figures/heatmaps/{span[0]}-{span[1]}.png", scale=5.0)
error_sim(all_data, span=[0,10])
error_sim(all_data, span=[10,20])
error_sim(all_data, span=[20,30])
error_sim(all_data, span=[30,40])
error_sim(all_data, span=[50,60])
error_sim(all_data, span=[90,100])
error_sim(all_data, span=[0,20])
error_sim(all_data, span=[40,60])
error_sim(all_data, span=[80,100])
# all_data = all_data.dropna()
# all_data['OEC'] = all_data.drop(columns=["timecode", "EFR", "SR", "Overall"]).mean(numeric_only=True, axis=1)
# print(all_data)
# exit()
# print(all_data)
# fig = draw_curve(df=all_data[all_data["EFR"].notnull()], fig_title=f"EFR", y_scale=[0.7, 1], x_key="timecode:O", y_key="EFR:Q", y_title="EFR")
# fig.save('figures/curves/EFRs.png', scale_factor=2.0)
# color_dom = ["CFT", "OnL2Reg", "OnEWC", "ER", "MaxLoss", "MIR"]
# color_range = ['gray', 'brown', '#7fc97f', '#D35400', 'purple', '#386cb0']
# fig = draw_curve(df=all_data[all_data["UKR"].notnull()], fig_title=f"UKR", y_scale=[0.66, 0.82], x_key="timecode:O", y_key="UKR:Q", y_title="UKR", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
# fig.save('figures/curves/UKRs.png', scale_factor=3.0)
# fig = draw_curve(df=all_data[all_data["OKR"].notnull()], fig_title=f"OKR", y_scale=[0.77, 0.96], x_key="timecode:O", y_key="OKR:Q", y_title="OKR", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
# fig.save('figures/curves/OKRs.png', scale_factor=3.0)
# fig = draw_curve(df=all_data[all_data["CSR"].notnull()], fig_title=f"CSR", y_scale=[0.52, 0.67], x_key="timecode:O", y_key="CSR:Q", y_title="CSR", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
# fig.save('figures/curves/CSRs.png', scale_factor=3.0)
# fig = draw_curve(df=all_data[all_data["KG"].notnull()], fig_title=f"KG", y_scale=[0.43, 0.54], x_key="timecode:O", y_key="KG:Q", y_title="KG", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
# fig.save('figures/curves/KGs.png', scale_factor=3.0)
# fig = draw_curve(df=all_data[all_data["OEC"].notnull()], fig_title=f"OEC", y_scale=[0.61, 0.72], x_key="timecode:O", y_key="OEC:Q", y_title="OEC", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
# fig.save('figures/curves/OECs.png', scale_factor=3.0)
|
CMR-main
|
experiments/bakcup/report_heatmap.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
maps = """MIR # 67.40 # QA_mir_lr=3e-5_ep=10_rs=32_rf=3_mcs=256_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
CFT # 61.58 # QA_simplecl_lr=3e-5_ep=10_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
ER # 66.62 # QA_er_lr=3e-5_ep=10_rs=32_rf=3_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
MaxLoss# 66.55 # QA_mir_lr=3e-5_ep=10_rs=32_rf=3_mcs=256_largest_afterloss_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
OnL2Reg # 65.09 # qa_simplecl_lr=3e-5_ep=10_l2w=1_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
OnEWC # 65.31 # qa_oewc_lr=3e-5_ep=10_lbd=250_gm=9e-1_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
"""
"""
*Frozen # 45.77 # QA_nonecl_T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8_result.json
"""
import os
import json
import pandas as pd
from cmr.notebooks.draw_utils import draw_stacked_bars, draw_curve
# os.chdir("experiments/results/qa/")
all_data = []
for line in maps.splitlines():
name, OECT, path = [i.strip() for i in line.split("#")]
# print(name, OECT, path)
o = json.load(open("experiments/results/qa/" + path))
# debugger_args = eval(o["debugger_args"])
# data_args = eval(o["data_args"])
r = o["online_eval_results"]
for item in r:
item["prefix"] = name
if item["timecode"] == 99:
item["timecode"] += 1
all_data += r
# print(o)
# EFRs = [item["EFR"] for item in online]
# UKRs = [item["UKR"] for item in online if "UKR" in item]
# OKRs = [item["OKR"] for item in online if "OKR" in item]
# KGs = [item["KG"] for item in online if "KG" in item]
# CSRs = [item["CSR"] for item in online if "CSR" in item]
# for item in all_data:
# if item["name"] == "*Frozne":
# item["OKR"] = 0
# else:
# if item["OKR"]
all_data = pd.DataFrame(all_data)
all_data = all_data.drop(columns=["before_eval_results", "before_error_ids", "mir_buffer_ids", "retrieved_ids", "OKR_sampled_ids"])
all_data = all_data.dropna()
all_data['OEC'] = all_data.drop(columns=["timecode", "EFR", "SR", "Overall"]).mean(numeric_only=True, axis=1)
# print(all_data)
# exit()
# print(all_data)
# fig = draw_curve(df=all_data[all_data["EFR"].notnull()], fig_title=f"EFR", y_scale=[0.7, 1], x_key="timecode:O", y_key="EFR:Q", y_title="EFR")
# fig.save('figures/curves/EFRs.png', scale_factor=2.0)
color_dom = ["CFT", "OnL2Reg", "OnEWC", "ER", "MaxLoss", "MIR"]
color_range = ['gray', 'brown', '#7fc97f', '#D35400', 'purple', '#386cb0']
fig = draw_curve(df=all_data[all_data["UKR"].notnull()], fig_title=f"UKR", y_scale=[0.66, 0.82], x_key="timecode:O", y_key="UKR:Q", y_title="UKR", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
fig.save('figures/curves/UKRs.png', scale_factor=3.0)
fig = draw_curve(df=all_data[all_data["OKR"].notnull()], fig_title=f"OKR", y_scale=[0.77, 0.96], x_key="timecode:O", y_key="OKR:Q", y_title="OKR", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
fig.save('figures/curves/OKRs.png', scale_factor=3.0)
fig = draw_curve(df=all_data[all_data["CSR"].notnull()], fig_title=f"CSR", y_scale=[0.52, 0.67], x_key="timecode:O", y_key="CSR:Q", y_title="CSR", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
fig.save('figures/curves/CSRs.png', scale_factor=3.0)
fig = draw_curve(df=all_data[all_data["KG"].notnull()], fig_title=f"KG", y_scale=[0.43, 0.54], x_key="timecode:O", y_key="KG:Q", y_title="KG", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
fig.save('figures/curves/KGs.png', scale_factor=3.0)
fig = draw_curve(df=all_data[all_data["OEC"].notnull()], fig_title=f"OEC", y_scale=[0.61, 0.72], x_key="timecode:O", y_key="OEC:Q", y_title="OEC", height=600, width=500, orient="none", color_dom=color_dom, color_range=color_range)
fig.save('figures/curves/OECs.png', scale_factor=3.0)
|
CMR-main
|
experiments/bakcup/report_curves.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
|
CMR-main
|
cmr/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This script was based on https://github.com/shmsw25/bart-closed-book-qa.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from cmr.models.utils import set_seeds
import sys
import argparse
import logging
import random
import numpy as np
import torch
from cmr.models.run_bart import run
def get_parser():
parser = argparse.ArgumentParser()
# Basic parameters
parser.add_argument("--train_file", default="data", required=False)
parser.add_argument("--dev_file", default="data", required=False)
parser.add_argument("--test_file", default="data", required=False)
parser.add_argument("--dataset", default="None", required=False)
parser.add_argument("--model", default="facebook/bart-base", required=False)
parser.add_argument("--output_dir", default=None, type=str, required=False)
parser.add_argument("--do_train", action='store_true')
parser.add_argument("--do_predict", action='store_true')
parser.add_argument("--predict_checkpoint", type=str,
default="best-model.pt")
# Model parameters
parser.add_argument("--checkpoint", type=str)
parser.add_argument("--do_lowercase", action='store_true', default=False)
parser.add_argument("--freeze_embeds", action='store_true', default=False)
# Preprocessing/decoding-related parameters
parser.add_argument('--max_input_length', type=int, default=128)
parser.add_argument('--max_output_length', type=int, default=32)
parser.add_argument('--num_beams', type=int, default=4)
parser.add_argument("--append_another_bos",
action='store_true', default=False)
# Training-related parameters
parser.add_argument("--train_batch_size", default=64, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--predict_batch_size", default=32, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
# parser.add_argument("--warmup_proportion", default=0.01, type=float,
# help="Weight decay if we apply some.") # Not used
parser.add_argument("--weight_decay", default=0.01, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=0.1, type=float,
help="Max gradient norm.")
parser.add_argument("--gradient_accumulation_steps", default=1, type=int,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=1000.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_steps", default=300, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--total_steps", default=-1, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--wait_step', type=int, default=10)
# Other parameters
parser.add_argument("--quiet", action='store_true',
help="If true, tqdm will not show progress bar")
parser.add_argument('--eval_period', type=int, default=100,
help="Evaluate & save model")
parser.add_argument('--prefix', type=str, default='',
help="Prefix for saving predictions")
parser.add_argument('--debug', action='store_true',
help="Use a subset of data for debugging")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
return parser
def main():
args = get_parser().parse_args()
# if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
# print("Output directory () already exists and is not empty.")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
# Start writing logs
log_filename = "{}log.txt".format("train_" if args.do_train else "eval_")
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
handlers=[logging.FileHandler(os.path.join(args.output_dir, log_filename)),
logging.StreamHandler()])
logger = logging.getLogger(__name__)
logger.info(args)
logger.info(args.output_dir)
set_seeds(args.seed)
args.n_gpu = torch.cuda.device_count()
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_predict:
raise ValueError(
"At least one of `do_train` or `do_predict` must be True.")
if args.do_train:
if not args.train_file:
raise ValueError(
"If `do_train` is True, then `train_dir` must be specified.")
if not args.dev_file:
raise ValueError(
"If `do_train` is True, then `predict_dir` must be specified.")
if args.do_predict:
if not args.test_file:
raise ValueError(
"If `do_predict` is True, then `predict_dir` must be specified.")
logger.info("Using {} gpus".format(args.n_gpu))
run(args, logger)
if __name__ == '__main__':
main()
|
CMR-main
|
cmr/cli_bart.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from cmr.notebooks.draw_utils import draw_stacked_bars
from altair.vegalite.v4.schema.core import ColorName
from sklearn.utils import validation
import pandas as pd
import json
def visualize_stream(submission_stream, data_names, cfg):
task_name = cfg["task_name"]
episode_size = cfg["b"]
submission_stat = []
init_error_stat = []
for time_step, episode_data in enumerate(list(submission_stream)):
for dn in data_names:
examples = [ex for ex in episode_data if ex["data_name"]==dn]
num_init_errors = [ex for ex in examples if ex["init_status"]=="error"]
if dn == data_names[0]:
dn = "*" + dn
submission_stat.append(dict(time_step=time_step, num_examples=len(examples), prefix=dn))
init_error_stat.append(dict(time_step=time_step, num_examples=len(num_init_errors), prefix=dn))
submission_stat_pd = pd.DataFrame(submission_stat)
filename_str = f"T={cfg['T']},b={cfg['b']},alpha={cfg['alpha']},beta={cfg['beta']},gamma={cfg['gamma']}|[{cfg['stream_id']}]"
title_str = f"alpha={cfg['alpha']}, beta={cfg['beta']}, gamma={cfg['gamma']}"
fig1 = draw_stacked_bars(df=submission_stat_pd, fig_title=f"Submission Stream ({title_str})", y_scale=[0., episode_size+1], x_key="time_step", y_key="sum(num_examples)", y_title="# of Examples")
fig1.save(f'figures/{task_name}.submission.{filename_str}.png', scale_factor=2.0)
init_error_stat_pd = pd.DataFrame(init_error_stat)
fig2 = draw_stacked_bars(df=init_error_stat_pd, fig_title=f"(Initial) Error Stream ({title_str})", y_scale=[0., episode_size+1], x_key="time_step", y_key="sum(num_examples)", y_title="# of Errors")
fig2.save(f'figures/{task_name}.init_error.{filename_str}.png', scale_factor=2.0)
# 50-version
# color_dom = ["*squad", "hotpot", "news", "nq", "search", "trivia"]
# color_range = ["gray", "blue", "orange", "green", "black", "brown"]
# color_range = ['#bab0ac', '#f0027f', '#7fc97f', '#D35400', '#9c9ede', '#386cb0']
# color_dom=None; color_range=None
# fig1 = draw_stacked_bars(df=submission_stat_pd[submission_stat_pd["time_step"]<=50], x_scale=[0, 50], fig_title=f"Submission Stream ({title_str})", y_scale=[0., 65], x_key="time_step", y_key="sum(num_examples)", y_title="# of Examples", width=1000, bin_width=18, color_dom=color_dom, color_range=color_range)
# fig1.save(f'figures/{task_name}.submission.{filename_str}.50.png', scale_factor=2.0)
# init_error_stat_pd = pd.DataFrame(init_error_stat)
# fig2 = draw_stacked_bars(df=init_error_stat_pd[init_error_stat_pd["time_step"]<=50], x_scale=[0, 50], fig_title=f"(Initial) Error Stream ({title_str})", y_scale=[0., 65], x_key="time_step", y_key="sum(num_examples)", y_title="# of Errors", width=1000, bin_width=18, color_dom=color_dom, color_range=color_range)
# fig2.save(f'figures/{task_name}.init_error.{filename_str}.50.png', scale_factor=2.0)
return
if __name__ == '__main__':
qa_data_names = ["squad", "nq", "trivia", "hotpot", "news", "search"]
cfg = dict(task_name="qa", upstream="squad") # T=100, b=64, alpha=0.9, beta=0.5, gamma=0.8
with open("experiments/eval_data/qa/submission_stream.T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8-test.json") as f:
streams = json.load(f)
str_start = f.name.index("submission_stream.") + len("submission_stream.")
str_end = f.name.index("-")
ns_config_str = f.name[str_start:str_end]
ns_config = eval(f"dict({ns_config_str})")
cfg.update(ns_config)
print(cfg)
for stream_id, stream in enumerate(streams):
cfg["stream_id"] = stream_id
visualize_stream(stream, qa_data_names, cfg)
|
CMR-main
|
cmr/benchmark_gen/visualize_streams.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import enum
import json
import argparse
import random
from re import S
from cmr.models.utils import set_seeds
from transformers import T5Tokenizer, T5ForConditionalGeneration
import torch
import numpy as np
from tqdm import tqdm
import spacy, nltk
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_stream_path",
default="exp_results/data_streams/mrqa_naturalquestions_dev.data_stream.test.wr.json", type=str)
parser.add_argument(
"--data_stream_path_with_paraphrases",
default="exp_results/data_streams/mrqa_naturalquestions_dev.data_stream.test.wr.wpara.json", type=str)
parser.add_argument(
"--data_paraphrased_dict",
default="exp_results/data_streams/paraphrase/mrqa_naturalquestions_dev.data_stream.test.wr.para_data.json", type=str)
parser.add_argument("--mode", default="paraphrasing", type=str)
parser.add_argument('--num_shards', type=int, default=4)
parser.add_argument('--shard_id', type=int, default=0)
def get_duplicate_ids(data_stream):
seen_ids = set()
examples_to_paraphrase = {}
for episode in data_stream:
for item in episode:
if item["id"] not in seen_ids:
seen_ids.add(item["id"])
else:
examples_to_paraphrase[item["id"]] = item
return examples_to_paraphrase
def split_sentences(text):
nlp = spacy.load('en_core_web_sm') # python -m spacy download en_core_web_sm
# text = "How are you today? I hope you have a great day"
docs = nlp(text)
sents = []
for sent in docs.sents:
sents.append(str(sent).strip())
return sents
def inference(tokenizer, model, inputs, K=5, max_input_length=100):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# print("Device: ", device)
inputs = add_prompt(inputs)
tokenized_input = tokenizer.batch_encode_plus(inputs,
pad_to_max_length=True,
max_length=max_input_length, return_tensors="pt")
batch_input_ids, batch_attention_masks = tokenized_input["input_ids"], tokenized_input["attention_mask"]
batch_input_ids = batch_input_ids.to(device)
batch_attention_masks = batch_attention_masks.to(device)
# batch_outputs = model.generate(
# input_ids=batch_input_ids, attention_mask=batch_attention_masks,
# max_length=max_input_length,
# do_sample=True,
# top_k=100,
# top_p=0.8,
# early_stopping=True,
# num_return_sequences=K
# )
batch_outputs = model.generate(
input_ids=batch_input_ids, attention_mask=batch_attention_masks,
max_length=max_input_length,
num_beams=5,
no_repeat_ngram_size=2,
early_stopping=True,
num_return_sequences=min(K, 5)
)
results = []
for output in batch_outputs:
line = tokenizer.decode(output, skip_special_tokens=True,
clean_up_tokenization_spaces=True)
results.append(line)
splitted_results = np.array_split(results, len(inputs))
# assert len(splitted_results[0]) == K
for id in range(len(splitted_results)):
splitted_results[id] = list(splitted_results[id])
splitted_results = list(splitted_results)
def is_too_similar(s1, s2):
return nltk.edit_distance(s1.lower().replace(" ", ""), s2.lower().replace(" ", "")) <= 10
for id in range(len(inputs)):
s = inputs[id]
splitted_results[id] = [p for p in splitted_results[id] if not is_too_similar(p, s)]
return splitted_results
def add_prompt(sentences):
return [f"paraphrase: {s} </s>" for s in sentences]
def get_paraphrased_example(model, tokenizer, example):
context, question = example["input"].split("|")
context = context.replace("Context: ", "")
question = question.replace("Question: ", "")
context_sentences = split_sentences(context)
context_paraphrases = inference(tokenizer, model, context_sentences, K=5)
question_paraphrases = inference(tokenizer, model, [question], K=7)
return context_sentences, context_paraphrases, question_paraphrases
# print(len(sentences), len(paraphrases), len(paraphrases[0]))
# print(sentences)
# print(paraphrases)
def init_para_model():
tokenizer = T5Tokenizer.from_pretrained("Vamsi/T5_Paraphrase_Paws")
model = T5ForConditionalGeneration.from_pretrained(
"Vamsi/T5_Paraphrase_Paws")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Device: ", device)
model = model.to(device)
return model, tokenizer
def sample_from_paras(example):
context_paraphrases_sampled = []
not_contain_answer = True
loop_times = 0
while not_contain_answer:
if loop_times >= 5:
# print(example["id"])
pass
for ind, candidates in enumerate(example["context_paraphrases"]):
if loop_times >= 5 and random.randint(0, 10) <= loop_times and not_contain_answer:
context_paraphrases_sampled.append(example["context_sentences"][ind])
else:
context_paraphrases_sampled.append(random.choice(candidates))
context = " ".join(context_paraphrases_sampled)
if any([a in context for a in example["truth"]]):
not_contain_answer = False
loop_times += 1
question = random.choice(example["question_paraphrases"][0])
assert len(question.strip()) >= 5
input_text = f"Context: {context} | Question: {question}"
return input_text
if __name__ == '__main__':
# init the paraphrasing model.
set_seeds(42)
args = parser.parse_args()
with open(args.data_stream_path, "r") as f :
data_stream = json.load(f)
examples_to_paraphrase = get_duplicate_ids(data_stream)
print(len(examples_to_paraphrase))
if args.mode == "paraphrasing":
model, tokenizer = init_para_model()
paraphrased_examples = {}
all_ids = sorted(list(examples_to_paraphrase.keys()))
current_ids = np.array_split(all_ids, args.num_shards)[args.shard_id]
for _id in tqdm(current_ids, desc=f"shard_id: {args.shard_id}"):
example = examples_to_paraphrase[_id]
context_sentences, context_paraphrases, question_paraphrases = get_paraphrased_example(model, tokenizer, example)
example["context_sentences"] = context_sentences
example["context_paraphrases"] = context_paraphrases
example["question_paraphrases"] = question_paraphrases
paraphrased_examples[_id] = example
with open(args.data_paraphrased_dict, "w") as f :
json.dump(paraphrased_examples, f)
else:
# to sample from the paraphrased examples.
with open(args.data_paraphrased_dict, "r") as f :
data_paraphrased_dict = json.load(f)
seen_ids = set()
for episode in tqdm(data_stream, desc="Sampling from paraphrases"):
for item in episode:
if item["id"] not in examples_to_paraphrase:
# unique examples can pass
item["is_paraphrased"] = False
continue
if item["id"] not in seen_ids:
# the first time seeing it.
seen_ids.add(item["id"])
item["is_paraphrased"] = False
else:
# 2nd, 3rd time seeing it
paraphrased_input_text = sample_from_paras(data_paraphrased_dict[item["id"]])
item["input"] = paraphrased_input_text
item["is_paraphrased"] = True
with open(args.data_stream_path_with_paraphrases, "w") as f:
json.dump(data_stream, f)
"""
thread=6
gpu=0
CUDA_VISIBLE_DEVICES=${gpu} python cmr/benchmark_gen/para_stream.py \
--data_paraphrased_dict "exp_results/data_streams/paraphrase/mrqa_naturalquestions_dev.data_stream.test.wr.para_data_${thread}.json" \
--num_shards $n_threads --shard_id ${thread} &
"""
"""
n_threads=8
n_gpus=8
start_gpuid=0
for (( thread=0; thread<${n_threads}; thread++ ))
do
gpu=$(($start_gpuid + $thread % n_gpus))
echo $thread, $gpu
CUDA_VISIBLE_DEVICES=${gpu} python cmr/benchmark_gen/para_stream.py \
--data_paraphrased_dict "exp_results/data_streams/paraphrase/mrqa_naturalquestions_dev.data_stream.test.wr.para_data_${thread}_of_${n_threads}.json" \
--num_shards $n_threads --shard_id ${thread} &
done
# merge the files
n_threads=8
python cmr/benchmark_gen/merge_json_file.py \
--input_file_pattern exp_results/data_streams/paraphrase/mrqa_naturalquestions_dev.data_stream.test.wr.para_data_#_of_${n_threads}.json \
--range "range(${n_threads})" \
--output_file exp_results/data_streams/paraphrase/mrqa_naturalquestions_dev.data_stream.test.wr.para_data.json
# post sampling
python cmr/benchmark_gen/para_stream.py \
--mode sampling \
--data_paraphrased_dict "exp_results/data_streams/paraphrase/mrqa_naturalquestions_dev.data_stream.test.wr.para_data.json"
"""
|
CMR-main
|
cmr/benchmark_gen/para_stream.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import argparse
from os import path
import random
import json
from cmr.models.utils import set_seeds
from cmr.task_manager.eval_metrics import evaluate_func
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def formatting_initial_status(data_name, predictions, truth_data, results_all, task="qa"):
assert len(predictions) == len(truth_data) == len(
results_all["EM"]) == len(results_all["QA-F1"])
formatted_data = []
for p, t, em, f1 in zip(predictions, truth_data, results_all["EM"], results_all["QA-F1"]):
item = dict()
item["input"] = t[0]
item["truth"] = t[1]
item["id"] = t[2]
item["mistake"] = p.strip()
if task == "qa":
if 0.5 < f1 < 1 and em == False:
# remove the false negative ones..
continue
item["score"] = {"EM": int(em == True), "QA-F1": float(f1)}
item["data_name"] = data_name
if em == False:
item["init_status"] = "error"
else:
item["init_status"] = "pass"
formatted_data.append(item)
return formatted_data
def load_datasets(args):
if args.task_name == "QA":
truth_paths = {
# "squad-train": "data/mrqa_squad/mrqa_squad_train.jsonl",
"squad": "data/mrqa_squad/mrqa_squad_dev.jsonl",
"nq": "data/mrqa_naturalquestions/mrqa_naturalquestions_dev.jsonl", #
"trivia": "data/mrqa_triviaqa/mrqa_triviaqa_dev.jsonl",
"hotpot": "data/mrqa_hotpotqa/mrqa_hotpotqa_dev.jsonl",
"news": "data/mrqa_newsqa/mrqa_newsqa_dev.jsonl",
"search": "data/mrqa_searchqa/mrqa_searchqa_dev.jsonl",
}
prediction_paths = {
# "squad-train": "upstream_resources/qa_upstream_preds/mrqa_squad_train.predictions.json",
"squad": "upstream_resources/qa_upstream_preds/mrqa_squad_dev.predictions.json",
"nq": "upstream_resources/qa_upstream_preds/mrqa_naturalquestions_dev.predictions.json",
"trivia": "upstream_resources/qa_upstream_preds/mrqa_triviaqa_dev.predictions.json",
"hotpot": "upstream_resources/qa_upstream_preds/mrqa_hotpotqa_dev.predictions.json",
"news": "upstream_resources/qa_upstream_preds/mrqa_newsqa_dev.predictions.json",
"search": "upstream_resources/qa_upstream_preds/mrqa_searchqa_dev.predictions.json",
}
upstream_data_name = "squad"
elif args.task_name == "NLI":
truth_paths = {
# "squad-train": "data/mrqa_squad/mrqa_squad_train.jsonl",
"snli": "data/snli/snli_validation.jsonl",
"multi_nli_matched": "data/multi_nli/multi_nli_validation_matched.jsonl", #
"multi_nli_mismatched": "data/multi_nli/multi_nli_validation_mismatched.jsonl", #
"scitail": "data/scitail/scitail_dev.jsonl",
"anli": "data/anli/anli_dev.jsonl",
}
prediction_paths = {
"snli": "upstream_resources/nli_upstream_preds/snli-snli_validation.predictions.json",
"multi_nli_matched": "upstream_resources/nli_upstream_preds/multi_nli-multi_nli_validation_matched.predictions.json", #
"multi_nli_mismatched": "upstream_resources/nli_upstream_preds/multi_nli-multi_nli_validation_mismatched.predictions.json", #
"scitail": "upstream_resources/nli_upstream_preds/scitail-scitail_dev.predictions.json",
"anli": "upstream_resources/nli_upstream_preds/anli-anli_dev.predictions.json",
}
upstream_data_name = "snli"
all_truth_data = {}
submission_data = {}
heldout_submission_data = {}
upstream_sampled_data = []
for data_name, data_file in truth_paths.items():
truth_data = []
with open(data_file) as fin:
lines = fin.readlines()
# train_examples = []
for line in lines:
d = json.loads(line)
truth_data.append((d["input"], d["output"], d["id"]))
all_truth_data[data_name] = truth_data
for data_name, prediction_file in prediction_paths.items():
with open(prediction_file, "r") as f:
predictions = json.load(f)
# get evaluation results.
results, results_all = evaluate_func(
predictions, all_truth_data[data_name], args.metric, return_all=True)
print(f"{data_name} --- Evaluation results: {results}")
formatted_data = formatting_initial_status(data_name, predictions, all_truth_data[data_name], results_all)
random.shuffle(formatted_data)
if data_name == upstream_data_name:
# random.sample(formatted_data, k=args.upstream_eval_size)
upstream_sampled_data = formatted_data[:args.upstream_eval_size]
submission_data[upstream_data_name] = formatted_data[args.upstream_eval_size:]
# print(f"len(upstream_sampled_data])={len(upstream_sampled_data)}")
else:
heldout_submission_data[data_name] = formatted_data[:args.heldout_submission_size] # held-out
submission_data[data_name] = formatted_data[args.heldout_submission_size:]
# print(f"len(heldout_submission_data['{data_name}'])={len(heldout_submission_data[data_name])}")
print(f"len(submission_data['{data_name}'])={len(submission_data[data_name])}")
for data_name, data in submission_data.items():
num_examples = len(data)
error_nums = [1 for item in data if item["init_status"] == "error"]
print(f"{data_name} -- # examples = {num_examples}; Error rate: {sum(error_nums)/num_examples}")
# QA_submission_data, QA_heldout_submission_data, QA_upstream_sampled_data
return submission_data, heldout_submission_data, upstream_sampled_data
def generate_submission_stream(submission_data, args, cfg):
submission_stream = []
upstream = cfg["upstream"]; T = cfg["T"]; b = cfg["b"]
alpha = cfg["alpha"]; beta = cfg["beta"]; gamma = cfg["gamma"]
assert upstream in submission_data
OODs = [data_name for data_name in submission_data if data_name != upstream]
N = len(OODs) # except for the upstream data
if beta == 1:
if args.task_name.lower() == "qa":
current_major_ood = "nq"
else:
current_major_ood = random.choice(OODs) # the initial major OOD cluster
for t in range(1, T+1):
S_t = []
if alpha == 0:
b_upstream = 0 # special case when upstream data ratio = 0; (because 0^0=1 by definition)
else:
b_upstream = round(b * (alpha**(t-1)))
b_ood = b - b_upstream
b_ood_major = round(b_ood * gamma)
b_ood_diverse = b_ood - b_ood_major
S_t += random.sample(submission_data[upstream], k=b_upstream)
S_t += random.sample(submission_data[current_major_ood], k=b_ood_major)
other_oods = [o for o in OODs if o != current_major_ood]
# diverse_pools = []
for o in other_oods:
# diverse_pools += submission_data[o]
S_t += random.sample(submission_data[o], k=int(b_ood_diverse/len(other_oods)))
if len(S_t) < b:
o = random.choice(other_oods)
S_t += random.sample(submission_data[o], k=b-len(S_t))
assert len(S_t) == b
# deal with the buffer
# Switch major ood
if random.random() < 1 - beta:
current_major_ood = random.choice(other_oods)
submission_stream.append(S_t)
# visualize_stream(submission_stream, [upstream] + OODs, cfg, args)
return submission_stream
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--upstream_eval_size", type=int, default=512)
parser.add_argument("--heldout_submission_size", type=int, default=256)
parser.add_argument("--episode_size", type=int, default=64)
parser.add_argument("--num_episodes", type=int, default=100)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--metric", default="EM|QA-F1")
parser.add_argument("--num_val", type=int, default=3)
parser.add_argument("--num_test", type=int, default=5)
parser.add_argument("--submission_stream_file", default="experiments/eval_data/qa/submission_stream.#args.json")
parser.add_argument("--sampled_upstream_dataset", default="experiments/eval_data/qa/upstream_eval.jsonl")
parser.add_argument("--heldout_submission_eval_file", default="experiments/eval_data/qa/heldout_eval.jsonl")
parser.add_argument("--task_name", default="QA")
args = parser.parse_args()
print(args)
set_seeds(args.seed)
if args.task_name == "NLI":
args.submission_stream_file = args.submission_stream_file.replace("qa", "nli")
args.sampled_upstream_dataset = args.sampled_upstream_dataset.replace("qa", "nli")
args.heldout_submission_eval_file = args.heldout_submission_eval_file.replace("qa", "nli")
# QA:
configs = {}
# configs["QA"] = dict(upstream="squad", T=args.num_episodes, b=args.episode_size, alpha=0.98, beta=1, gamma=1)
configs["QA"] = []
configs["QA"].append(dict(upstream="squad", T=args.num_episodes, b=args.episode_size, alpha=0.9, beta=0.9, gamma=0.8))
configs["QA"].append(dict(upstream="squad", T=args.num_episodes, b=args.episode_size, alpha=0.9, beta=0.5, gamma=0.8))
configs["QA"].append(dict(upstream="squad", T=args.num_episodes, b=args.episode_size, alpha=0.9, beta=0.1, gamma=0.8))
configs["QA"].append(dict(upstream="squad", T=args.num_episodes, b=args.episode_size, alpha=0.9, beta=0.5, gamma=0.5))
configs["QA"].append(dict(upstream="squad", T=args.num_episodes, b=args.episode_size, alpha=0.9, beta=0.5, gamma=0.2))
configs["QA"].append(dict(upstream="squad", T=args.num_episodes, b=args.episode_size, alpha=0.1, beta=0.5, gamma=0.8))
configs["QA"].append(dict(upstream="squad", T=args.num_episodes, b=args.episode_size, alpha=0.95, beta=0.5, gamma=0.8))
configs["NLI"] = []
configs["NLI"].append(dict(upstream="snli", T=args.num_episodes, b=args.episode_size, alpha=0.9, beta=0.9, gamma=0.8))
configs["NLI"].append(dict(upstream="snli", T=args.num_episodes, b=args.episode_size, alpha=0.9, beta=0.5, gamma=0.8))
configs["NLI"].append(dict(upstream="snli", T=args.num_episodes, b=args.episode_size, alpha=0.9, beta=0.1, gamma=0.8))
# if args.task_name == "QA":
submission_data, heldout_submission_data, upstream_sampled_data = load_datasets(args)
with open(args.heldout_submission_eval_file, "w") as f:
flat_heldout_submission_data = []
for v in list(heldout_submission_data.values()):
flat_heldout_submission_data += v
for item in flat_heldout_submission_data:
f.write(json.dumps(item) + "\n")
with open(args.sampled_upstream_dataset, "w") as f:
for item in upstream_sampled_data:
f.write(json.dumps(item) + "\n")
cfgs = configs[args.task_name]
for cfg in cfgs:
# Generate Validaiton/Test Streams
validation_streams = []
test_streams = []
for _ in range(args.num_val):
submission_stream = generate_submission_stream(submission_data, args, cfg)
validation_streams.append(submission_stream)
for _ in range(args.num_test):
submission_stream = generate_submission_stream(submission_data, args, cfg)
test_streams.append(submission_stream)
prefix_title_str = f"T={cfg['T']},b={cfg['b']},alpha={cfg['alpha']},beta={cfg['beta']},gamma={cfg['gamma']}"
title_str = prefix_title_str + "-val"
with open(args.submission_stream_file.replace("#args", title_str), "w") as f:
print(f"To save {f.name}")
json.dump(validation_streams, f)
title_str = prefix_title_str + "-test"
with open(args.submission_stream_file.replace("#args", title_str), "w") as f:
print(f"To save {f.name}")
json.dump(test_streams, f)
if __name__ == '__main__':
main()
"""
python cmr/benchmark_gen/sample_submission_streams.py --task_name QA
python cmr/benchmark_gen/sample_submission_streams.py --task_name NLI --episode_size 256
"""
|
CMR-main
|
cmr/benchmark_gen/sample_submission_streams.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
|
CMR-main
|
cmr/benchmark_gen/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import json
import argparse
from types import new_class
import random
parser = argparse.ArgumentParser()
parser.add_argument(
"--upstream_file",
default="data/mrqa_squad/mrqa_squad_train.jsonl", type=str)
parser.add_argument(
"--submission_file",
default="experiments/eval_data/qa/submission_stream.T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8.json", type=str)
parser.add_argument(
"--mixed_offline_file",
default="experiments/eval_data/qa/offline_retrain.T=100,b=64,alpha=0.9,beta=0.5,gamma=0.8.jsonl", type=str)
parser.add_argument(
"--heldout_eval_file",
default="experiments/eval_data/qa/heldout_eval.jsonl", type=str)
parser.add_argument("--ratio", default=-1, type=float)
args = parser.parse_args()
with open(args.submission_file, "r") as f :
data_stream = json.load(f)
all_init_errors = []
for data_batch in data_stream:
for item in data_batch:
if item["init_status"] == "error":
data = dict(id=item["id"], input=item["input"], output=item["truth"])
all_init_errors.append(json.dumps(data))
eval_examples = []
with open(args.heldout_eval_file) as f:
eval_lines = f.read().splitlines()
for line in eval_lines:
item = json.loads(line)
data = dict(id=item["id"], input=item["input"], output=item["truth"])
eval_examples.append(json.dumps(data))
# heldout_eval_file
with open(args.upstream_file) as f:
upstream_lines = f.read().splitlines()
if args.ratio == 1:
upstream_lines = upstream_lines
else:
upstream_lines = random.sample(upstream_lines, len(all_init_errors)) # same number of examples
mixed_lines = upstream_lines + all_init_errors
with open(args.mixed_offline_file, "w") as f:
for line in mixed_lines:
f.write(line+"\n")
with open(args.mixed_offline_file.replace(".jsonl", ".dev.jsonl"), "w") as f:
for line in eval_examples:
f.write(line+"\n")
print(f"len(upstream_lines)={len(upstream_lines)}")
print(f"len(all_init_errors)={len(all_init_errors)}")
print(f"len(mixed_lines)={len(mixed_lines)}")
print(f"len(eval_examples)={len(eval_examples)}")
|
CMR-main
|
cmr/benchmark_gen/generate_offline_retrainfile.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from __future__ import absolute_import, division, print_function
import argparse
import json
import logging
import os
import random
from cmr.models.utils import set_seeds
import sys
import numpy as np
import torch
from cmr.benchmark_gen import bart_api
from cmr.task_manager.eval_metrics import (evaluate_func,
normalize_answer)
def main():
parser = argparse.ArgumentParser()
# Mode
parser.add_argument("--post_process", action='store_true')
# Data distributed
parser.add_argument("--data_dist", action='store_true')
parser.add_argument('--local_id', type=int, default=-1, help="")
parser.add_argument('--num_shards', type=int, default=-1, help="")
# Basic parameters
parser.add_argument(
"--data_file", default="data/mrqa_naturalquestions/mrqa_naturalquestions_dev.100.jsonl", required=False)
parser.add_argument(
"--prediction_file", default="bug_data/mrqa_naturalquestions_dev.bugs.jsonl", required=False)
# parser.add_argument(
# "--bug_file", default="bug_data/mrqa_naturalquestions_dev.bugs.jsonl", required=False)
parser.add_argument(
"--conig_file", default="scripts/infer_mrqa_bart_base.config", required=False)
parser.add_argument("--prefix", default="", required=False)
# API for Evaluation
parser.add_argument("--metric", default="EM|QA-F1", required=False)
# Sampling
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
args = parser.parse_args()
set_seeds(args.seed)
log_filename = "logs/build_bugpool_log_{}.txt".format(args.prefix)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
handlers=[logging.FileHandler(os.path.join(log_filename)),
logging.StreamHandler()])
logger = logging.getLogger(__name__)
# get the truth data
truth_data = []
with open(args.data_file) as fin:
lines = fin.readlines()
# train_examples = []
for line in lines:
# d = line.strip().split("\t")
# truth_data.append((d[0], d[1:]))
d = json.loads(line)
truth_data.append((d["input"], d["output"], d["id"]))
# get the predictions of a model via its API and config file.
if not args.post_process:
predictions = bart_api.inference_api(
config_file=args.conig_file,
test_file=args.data_file,
logger=logger, data_dist=args.data_dist, num_shards=args.num_shards, local_id=args.local_id)
with open(args.prediction_file, "w") as f:
json.dump(predictions, f)
else:
# base_path = "bug_data/mrqa_naturalquestions_train.predictions.shard_id.jsonl"
# num_shards = 7
predictions = []
for shard_id in range(0, args.num_shards):
current_file = args.prediction_file.replace("shard_id", str(shard_id))
print("loading", current_file)
with open(current_file, "r") as f:
for line in f.read().splitlines():
predictions += json.loads(line)
print(len(predictions), len(truth_data))
# get evaluation results.
results, results_all = evaluate_func(
predictions, truth_data, args.metric, return_all=True)
logging.info(f"Evaluation results: {results}")
with open(args.prediction_file.replace(".shard_id.", "."), "w") as f:
json.dump(predictions, f)
# bug_lines, pass_lines = generate_bugs(predictions, truth_data, results_all)
# logging.info("{} example are passed. Found {} bugs ".format(
# len(pass_lines), len(bug_lines)))
# # save the bugs
# with open(args.bug_file, "w") as f:
# f.write("\n".join(bug_lines))
# # save the passes
# with open(args.bug_file.replace("bugs", "pass"), "w") as f:
# f.write("\n".join(pass_lines))
if __name__ == '__main__':
main()
"""
python cmr/benchmark_gen/run_bart_infer.py \
--
"""
|
CMR-main
|
cmr/benchmark_gen/run_bart_infer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import argparse
import json
import random
from cmr.task_manager.eval_metrics import evaluate_func
import numpy as np
def generate_bugs(predictions, truth_data, results_all, f1_upper_bound=0.5):
assert len(predictions) == len(truth_data) == len(
results_all["EM"]) == len(results_all["QA-F1"])
bug_lines = []
pass_lines = []
for p, t, em, f1 in zip(predictions, truth_data, results_all["EM"], results_all["QA-F1"]):
item = dict()
item["input"] = t[0]
item["truth"] = t[1]
item["id"] = t[2]
item["mistake"] = p.strip()
item["score"] = {"EM": int(em == True), "QA-F1": float(f1)}
if em == False and f1 <= f1_upper_bound: # decide later about the threshold of f1 score
bug_lines.append(item)
item["init_status"] = "error"
if em == True:
pass_lines.append(item)
item["init_status"] = "pass"
return bug_lines, pass_lines
def get_data_stream(data_pool, batch_size, num_batches, use_score=False):
assert batch_size * num_batches <= len(data_pool)
if use_score:
# from easier to harder
sorted_bugs = sorted(data_pool, key=lambda x: x["score"]["QA-F1"], reverse=True)
else:
# no sorting, randomly shuffuled
random.shuffle(data_pool)
sorted_bugs = data_pool
data_stream = []
for start in range(0, len(data_pool), batch_size):
end = min(start + batch_size, len(data_pool))
data_batch = sorted_bugs[start:end]
data_stream.append(data_batch)
if len(data_stream) == num_batches:
break
return data_stream
def get_data_stream_with_replacement(data_pool, batch_size, num_batches):
assert batch_size * num_batches <= len(data_pool)
# no sorting, randomly shuffuled
random.shuffle(data_pool)
data_stream = []
seen_ids = set()
duplicate_ids = set()
num_repetition = 0
num_revisited_times = 0
for _ in range(0, num_batches):
data_batch = random.sample(data_pool, batch_size)
data_stream.append(data_batch)
num_repetition += len([_ for item in data_batch if item["id"] in seen_ids])
revisited_ids = [item["id"] for item in data_batch if item["id"] in seen_ids]
num_revisited_times += len(revisited_ids)
duplicate_ids.update(revisited_ids)
seen_ids.update([item["id"] for item in data_batch])
print(f"num_repetition: {num_repetition}; num_total_examples: {len(seen_ids)}; length: {batch_size * num_batches}; ratio: {num_repetition/(batch_size * num_batches)}; num_duplicate_ids: {len(duplicate_ids)}; num_revisited_times: {num_revisited_times}")
return data_stream
def get_replay_stream(data_stream, replay_eval_size, window_size=10):
past_error_pool = {} # errror in terms of the initial model
replay_stream = []
for timecode, data_batch in enumerate(data_stream):
# add the errors to the pool
past_error_pool[timecode] = []
for item in data_batch:
if True or item["init_status"] == "error":
past_error_pool[timecode].append(item)
# build the pool
start_ind = max(0, timecode-window_size)
end_ind = min(timecode, len(past_error_pool))
candidate_replay_instances = []
if end_ind == 0:
continue # do not add for the first episode because there is no history for it
for ind in range(start_ind, end_ind): # not including itself
candidate_replay_instances += past_error_pool[ind]
for _db in data_stream[-5:]:
if len(candidate_replay_instances) >= replay_eval_size:
break
for item in _db:
if len(candidate_replay_instances) >= replay_eval_size:
break
if item["init_status"] == "pass":
candidate_replay_instances.append(item)
# print(start_ind, end_ind, len(candidate_replay_instances))
assert len(candidate_replay_instances) >= replay_eval_size
sampled_replay = random.sample(candidate_replay_instances, replay_eval_size)
replay_stream.append(sampled_replay)
assert len(replay_stream) == len(data_stream) - 1
return replay_stream
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_file", default="data/mrqa_naturalquestions/mrqa_naturalquestions_train.jsonl", required=False)
parser.add_argument(
"--prediction_file", default="bug_data/mrqa_naturalquestions_train.predictions.jsonl", required=False) # Input
parser.add_argument(
"--data_stream_file", default="bug_data/mrqa_naturalquestions_dev.data_stream.train.json", required=False) # Output
parser.add_argument(
"--replay_stream_file", default="bug_data/mrqa_naturalquestions_dev.replay_stream.train.json", required=False) # Output
parser.add_argument(
"--hidden_example_file", default="bug_data/mrqa_naturalquestions.hidden.jsonl", required=False) # Output
parser.add_argument("--batch_size", type=int, default=32, required=False)
parser.add_argument("--replay_eval_size", type=int, default=-1, required=False)
parser.add_argument("--bug_sample_size", type=int, default=1000, required=False)
parser.add_argument("--max_bug_each_data", type=int, default=-1, required=False)
parser.add_argument("--pass_sample_size", type=int, default=2200, required=False)
parser.add_argument("--hidden_sample_size", type=int, default=-1, required=False)
parser.add_argument("--num_batches", type=int, default=100, required=False)
parser.add_argument("--seed", type=int, default=42, required=False)
parser.add_argument("--metric", default="EM|QA-F1", required=False)
parser.add_argument("--sample_method", default="no_replace", required=False)
# batch_size * num_batches <= # lines of bug_pool_file
args = parser.parse_args()
print(args)
random.seed(args.seed)
all_truth_data = []
for data_file in args.data_file.split("#"):
truth_data = []
with open(data_file) as fin:
lines = fin.readlines()
# train_examples = []
for line in lines:
# d = line.strip().split("\t")
# truth_data.append((d[0], d[1:]))
d = json.loads(line)
truth_data.append((d["input"], d["output"], d["id"]))
all_truth_data.append(truth_data)
all_pred_data = []
merged_restuls_all = None
for prediction_file in args.prediction_file.split("#"):
with open(prediction_file, "r") as f:
predictions = json.load(f)
# get evaluation results.
print(f"len(predictions): {len(predictions)}")
print(f"len(all_truth_data[len(all_pred_data)]): {len(all_truth_data[len(all_pred_data)])}")
results, results_all = evaluate_func(
predictions, all_truth_data[len(all_pred_data)], args.metric, return_all=True)
print(f"{prediction_file}; Evaluation results: {results}")
all_pred_data.append(predictions)
if merged_restuls_all is None:
merged_restuls_all = results_all
else:
for key in merged_restuls_all:
merged_restuls_all[key].extend(results_all[key])
merged_truth_data = []
for item in all_truth_data:
merged_truth_data.extend(item)
merged_predictions = []
for item in all_pred_data:
merged_predictions.extend(item)
bug_pool, pass_pool = generate_bugs(merged_predictions, merged_truth_data, merged_restuls_all)
# make each dataset has the same number of examples
if len(all_truth_data) >= 2:
filtered_bug_pool = []
counts = {}
random.shuffle(bug_pool)
for item in bug_pool:
dataset_name = item["id"].split("-")[0]
if dataset_name not in counts:
counts[dataset_name] = 0
if counts[dataset_name] >= args.max_bug_each_data and args.max_bug_each_data > 0:
continue
filtered_bug_pool.append(item)
counts[dataset_name] += 1
bug_pool = filtered_bug_pool
else:
bug_pool = bug_pool
# exit()
print(f"len(bug_pool)={len(bug_pool)}; len(pass_pool)={len(pass_pool)} <--- len(predictions)={len(predictions)}")
# bug_pool = []
# with open(args.bug_pool_file) as f:
# for line in f.read().splitlines():
# bug_pool.append(json.loads(line))
# with open(args.pass_pool_file) as f:
# pass_pool = [json.loads(line) for line in f.read().splitlines()]
# pass_pool = [item for item in pass_pool if item["score"]
# ["EM"] == 1] # only the EM=1 examples
random.shuffle(pass_pool)
random.shuffle(bug_pool)
if args.bug_sample_size >= 0 and args.pass_sample_size >= 0:
sampled_bug_pool = bug_pool[:args.bug_sample_size]
sampled_pass_pool = pass_pool[:args.pass_sample_size]
if args.hidden_sample_size > 0 and args.hidden_sample_size + args.pass_sample_size <= len(pass_pool):
if len(all_truth_data) >= 2:
# make equal test examples.
hidden_examples = []
counts = {}
random.shuffle(pass_pool)
for item in pass_pool:
dataset_name = item["id"].split("-")[0]
if dataset_name not in counts:
counts[dataset_name] = 0
if counts[dataset_name] >= (args.hidden_sample_size/len(all_truth_data)):
continue
hidden_examples.append(item)
counts[dataset_name] += 1
else:
hidden_examples = pass_pool[-args.hidden_sample_size:]
with open(args.hidden_example_file, "w") as f:
for item in hidden_examples:
f.write(json.dumps(item) + "\n")
print(len(sampled_bug_pool), len(sampled_pass_pool))
sampled_data_pool = sampled_bug_pool + sampled_pass_pool
else:
sampled_data_pool = pass_pool + bug_pool
sampled_data_pool = sampled_data_pool[:args.batch_size * args.num_batches]
if args.sample_method == "no_replace":
data_stream = get_data_stream(
sampled_data_pool, args.batch_size, args.num_batches, use_score=False) # randomly sorted bugs
elif args.sample_method == "with_replace":
data_stream = get_data_stream_with_replacement(
sampled_data_pool, args.batch_size, args.num_batches) # randomly sorted bugs
if args.replay_eval_size > 0:
replay_stream = get_replay_stream(data_stream, args.replay_eval_size)
# replay_stream.insert(0, random.sample(sampled_bug_pool, args.replay_eval_size))
replay_stream.insert(0, random.sample(sampled_data_pool, args.replay_eval_size))
with open(args.replay_stream_file, "w") as f:
json.dump(replay_stream, f)
with open(args.data_stream_file, "w") as f:
json.dump(data_stream, f)
if __name__ == '__main__':
main()
"""
python semanticdebugger/benchmark_gen/sample_stream_data.py \
--sample_method no_replace \
--data_file \
data/mrqa_naturalquestions/mrqa_naturalquestions_dev.jsonl#\
data/mrqa_squad/mrqa_squad_dev.jsonl#\
data/mrqa_triviaqa/mrqa_triviaqa_dev.jsonl#\
data/mrqa_hotpotqa/mrqa_hotpotqa_dev.jsonl \
--prediction_file \
bug_data/mrqa_naturalquestions_dev.predictions.jsonl#\
bug_data/mrqa_squad_dev.predictions.jsonl#\
bug_data/mrqa_triviaqa_dev.predictions.jsonl#\
bug_data/mrqa_hotpotqa_dev.predictions.jsonl \
--data_stream_file exp_results/data_streams/mrqa.mixed.data_stream.test.json \
--hidden_sample_size 1000 \
--hidden_example_file exp_results/data_streams/mrqa.mixed.hidden_passes.jsonl \
--batch_size 32 --num_batches 100 \
--seed 42 \
--max_bug_each_data 800 \
--bug_sample_size 3200 --pass_sample_size 0
# python semanticdebugger/benchmark_gen/sample_stream_data.py \
# --sample_method no_replace \
# --data_file data/mrqa_naturalquestions/mrqa_naturalquestions_train.jsonl \
# --prediction_file bug_data/mrqa_naturalquestions_train.predictions.jsonl \
# --data_stream_file exp_results/data_streams/mrqa_naturalquestions_dev.data_stream.train.wr.json \
# --hidden_example_file exp_results/data_streams/mrqa_naturalquestions_dev.hidden_passes.jsonl \
# --batch_size 32 --num_batches 500 \
# --bug_sample_size 4688 --pass_sample_size 0 \
# --hidden_sample_size -1
"""
|
CMR-main
|
cmr/benchmark_gen/sample_stream_data.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_file_pattern",
default="exp_results/data_streams/paraphrase/mrqa_naturalquestions_dev.data_stream.test.wr.para_data_#.json", type=str)
parser.add_argument(
"--output_file",
default="exp_results/data_streams/paraphrase/mrqa_naturalquestions_dev.data_stream.test.wr.para_data.json", type=str)
parser.add_argument(
"--range",
default="range(16)", type=str)
parser.add_argument(
"--mode",
default="json", type=str)
args = parser.parse_args()
all_data = None
for shard_id in eval(args.range):
filename = args.input_file_pattern.replace("#", str(shard_id))
if args.mode == "json":
with open(filename) as f:
print(f.name)
data = json.load(f)
elif args.mode == "jsonl":
with open(filename) as f:
print(f.name)
data = [json.loads(line) for line in f.read().splitlines() if line]
if all_data is None:
all_data = data
else:
if type(all_data) == dict:
all_data.update(data)
else:
all_data += data
with open(args.output_file, "w") as f:
if args.mode == "json":
json.dump(all_data, f)
elif args.mode == "jsonl":
for item in all_data:
f.write(json.dumps(item) + "\n")
|
CMR-main
|
cmr/benchmark_gen/merge_json_file.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import json
import os
import sys
from argparse import Namespace
import torch
from cmr.models.mybart import MyBart
from cmr.models.run_bart import inference
from cmr.models.utils import (convert_model_to_single_gpu,
freeze_embeds, trim_batch)
from cmr.task_manager.dataloader import GeneralDataset
from tqdm import tqdm
from transformers import BartConfig, BartTokenizer
def inference_api(config_file, test_file, logger, data_dist, num_shards, local_id):
with open(config_file) as f:
config_args = eval(f.read()) # an Namespace object in python language
args = config_args
logger.info(f"Config args: {config_args}")
# load config from json
test_data = GeneralDataset(
logger, args, test_file, data_type="dev", is_training=False, task_name=args.dataset, data_dist=data_dist, num_shards=num_shards, local_id=local_id)
tokenizer = BartTokenizer.from_pretrained("bart-large")
test_data.load_dataset(tokenizer, skip_cache=data_dist)
test_data.load_dataloader()
checkpoint = os.path.join(args.predict_checkpoint)
logger.info("Loading checkpoint from {} ....".format(checkpoint))
model = MyBart.from_pretrained(args.model,
state_dict=convert_model_to_single_gpu(torch.load(checkpoint)))
logger.info("Loading checkpoint from {} .... Done!".format(checkpoint))
if torch.cuda.is_available():
model.to(torch.device("cuda"))
model.eval()
predictions = inference(
model, test_data, save_predictions=False, verbose=True, args=args, logger=logger, return_all=False, predictions_only=True)
return predictions
# logger.info("%s on %s data: %.s" % (test_data.metric, test_data.data_type, str(result)))
|
CMR-main
|
cmr/benchmark_gen/bart_api.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import numpy as np
import string
import re
from collections import Counter
from sklearn.metrics import matthews_corrcoef, f1_score
from scipy.stats import pearsonr, spearmanr
# from rouge import Rouge
METRICS = {
'mrqa_naturalquestions': 'EM|QA-F1',
'mrqa': 'EM|QA-F1',
'nli': 'EM',
'csr': 'EM|QA-F1',
}
def accuracy(prediction, ground_truth):
return prediction.lower() == ground_truth.lower()
def evaluate_func(predictions, data, metric, return_all=False):
def cast_to_float(predictions):
new_predictions = []
for prediction in predictions:
try:
new_predictions.append(float(prediction.strip()))
except:
new_predictions.append(float('NaN'))
assert len(new_predictions) == len(predictions)
return new_predictions
assert len(predictions) == len(data)
all_metrics = [m.strip() for m in metric.split("|")]
results = {}
results_all = {}
for m in all_metrics:
if m == "EM":
ems = []
for (prediction, dp) in zip(predictions, data):
ems.append(get_exact_match_over_list(prediction, dp[1]))
results[m] = np.mean(ems)
results_all[m] = [bool(_i) for _i in ems]
elif m == "ACC":
accs = []
for (prediction, dp) in zip(predictions, data):
accs.append(get_accruacy_over_list(prediction, dp[1]))
results[m] = np.mean(accs)
results_all[m] = accs
elif m == "QA-F1":
f1s = []
for (prediction, dp) in zip(predictions, data):
f1s.append(get_f1_over_list(prediction, dp[1]))
results[m] = np.mean(f1s)
# results_all[m] = f1s
results_all[m] = [float(_i) for _i in f1s]
elif m == "Classification-F1":
results[m] = f1_score([dp[1][0]
for dp in data], predictions, average="macro")
elif m == "Matthew-Correlation":
results[m] = get_matthews_corr(data, predictions)
elif m == "Pearson-Correlation":
predictions = cast_to_float(predictions)
results[m] = pearsonr([float(dp[1][0])
for dp in data], predictions)[0]
# elif m == "Rouge-L":
# rouges = []
# for (prediction, dp) in zip(predictions, data):
# rouges.append(get_rouge_over_list(prediction, dp[1]))
# results[m] = np.mean(rouges)
if return_all:
return results, results_all
return results
def get_matthews_corr(data, predictions):
# only cola is using this...?
new_predictions = []
for prediction in predictions:
if prediction.strip() == "acceptable":
new_predictions.append(1.0)
else:
new_predictions.append(0.0)
new_gold = []
for dp in data:
if dp[1][0] == "acceptable":
new_gold.append(1.0)
else:
new_gold.append(0.0)
return matthews_corrcoef(new_gold, new_predictions)
def qa_f1_score(prediction, ground_truth):
prediction_tokens = prediction.split()
ground_truth_tokens = ground_truth.split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
# def get_rouge_over_list(prediction, groundtruth):
# def remove_punc(text):
# exclude = set(string.punctuation)
# return ''.join(ch for ch in text if ch not in exclude)
# if len(remove_punc(prediction)) == 0:
# return 0.0 # during early stages, it might generate nothin?
# # print(prediction)
# rouge = Rouge()
# if type(groundtruth)==list:
# if len(groundtruth)==0:
# return 0
# return np.max([rouge.get_scores(prediction, gt, avg=True)["rouge-l"]["f"] for gt in groundtruth])
# return rouge.get_scores(prediction, groundtruth, avg=True)["rouge-l"]["f"]
def get_accruacy_over_list(prediction, groundtruth):
if type(groundtruth) == list:
if len(groundtruth) == 0:
return 0
return np.max([accuracy(prediction, gt) for gt in groundtruth])
return accuracy(prediction, groundtruth)
def get_f1_over_list(prediction, groundtruth):
# if type(groundtruth)==list:
if len(groundtruth) == 0:
return 0
prediction_norm = normalize_answer(prediction)
return np.max([qa_f1_score(prediction_norm, normalize_answer(gt)) for gt in groundtruth])
# return qa_f1_score(prediction, groundtruth)
def get_exact_match_over_list(prediction, groundtruth):
# if type(groundtruth)==list:
if len(groundtruth) == 0:
return 0
prediction_norm = normalize_answer(prediction)
return np.max([(prediction_norm == normalize_answer(gt)) for gt in groundtruth])
# return (normalize_answer(prediction) == groundtruth)
def normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
CMR-main
|
cmr/task_manager/eval_metrics.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
|
CMR-main
|
cmr/task_manager/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import os
import json
from .base_datamanager import MyQADataset, MyDataLoader
from .eval_metrics import METRICS, evaluate_func
import torch
import numpy as np
class GeneralDataset(object):
def __init__(self, logger, args, data_path, data_type, is_training, task_name, given_data=None, data_dist=False, num_shards=-1, local_id=-1):
# should give the tasks used in this split in the var "tasks"
self.data_path = data_path
self.data_type = data_type
self.data = []
self.task_name = task_name
if given_data is not None:
self.data = given_data
else:
with open(data_path) as fin:
lines = fin.readlines()
# train_examples = []
for line in lines:
# d = line.strip().split("\t")
# self.data.append((d[0], d[1:]))
d = json.loads(line)
self.data.append((d["input"], d["output"], d["id"]))
self.is_training = is_training
self.load = not args.debug if hasattr(args, "debug") else True
self.logger = logger
self.args = args
self.metric = METRICS[self.task_name]
# self.max_input_length = self.args.max_input_length
self.tokenizer = None
self.dataset = None
self.dataloader = None
self.cache = None
self.gen_early_stop = False
if data_dist and local_id >= 0 and num_shards > 0:
# num_shards = torch.distributed.get_world_size() # the number of gpus
# local_shard_id = torch.distributed.get_rank() # the current process id
self.logger.info(f'dataset_size={len(self.data)}, num_shards={num_shards}, local_shard_id={local_id}')
self.data = np.array_split(self.data, num_shards)[local_id]
# # make it evenly divisible
# indices = indices[:shard_size * num_shards]
# assert len(indices) == shard_size * num_shards
# # subsample
# indices = indices[local_shard_id:len(indices):num_shards]
# assert len(indices) == shard_size
# indices = set(indices)
def __len__(self):
return len(self.data)
def decode(self, tokens):
return self.tokenizer.decode(tokens, skip_special_tokens=True, clean_up_tokenization_spaces=True)
def decode_batch(self, tokens):
return [self.decode(_tokens) for _tokens in tokens]
def flatten(self, answers):
new_answers, metadata = [], []
for answer in answers:
metadata.append((len(new_answers), len(new_answers)+len(answer)))
new_answers += answer
return new_answers, metadata
def load_dataset(self, tokenizer, do_return=False, skip_cache=False, quiet=False):
self.tokenizer = tokenizer
postfix = "prepro" + tokenizer.__class__.__name__.replace("zer", "zed")
inputs = []
outputs = []
uuids = []
for dp in self.data:
# Add the task name to the input
# inputs.append(" [{}] {}".format(self.task_name, dp[0]))
inputs.append(dp[0])
outputs.append(dp[1]) # is a list
uuids.append(dp[2])
if not skip_cache:
preprocessed_path = os.path.join(
"/".join(self.data_path.split("/")[:-1]),
self.data_path.split("/")[-1].replace(".jsonl", "-{}.json".format(postfix)))
self.logger.info(f"preprocessed_path={preprocessed_path}")
if not skip_cache and self.load and os.path.exists(preprocessed_path):
# load preprocessed input
self.logger.info(
"Loading pre-tokenized data from {}".format(preprocessed_path))
with open(preprocessed_path, "r") as f:
input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, \
metadata = json.load(f)
else:
if not quiet:
self.logger.info(
"Start tokenizing ... {} instances".format(len(self.data)))
if not quiet:
self.logger.info("Printing 3 examples")
for i in range(3):
self.logger.info(inputs[i])
self.logger.info(outputs[i])
outputs, metadata = self.flatten(outputs) # what is metadata?
# self.logger.info("Printing 3 examples's outputs and metadata after flattening")
# for i in range(3):
# self.logger.info(outputs[i])
# self.logger.info(metadata[i])
if self.args.do_lowercase:
inputs = [input0.lower() for input0 in inputs]
outputs = [output0.lower() for output0 in outputs]
if self.args.append_another_bos:
inputs = ["<s> "+input0 for input0 in inputs]
outputs = ["<s> " + output0 for output0 in outputs]
if not quiet:
self.logger.info("Tokenizing Input ...")
tokenized_input = tokenizer.batch_encode_plus(inputs,
pad_to_max_length=True,
max_length=self.args.max_input_length)
if not quiet:
self.logger.info("Tokenizing Input ... Done!")
self.logger.info("Tokenizing Output ...")
tokenized_output = tokenizer.batch_encode_plus(outputs,
pad_to_max_length=True,
max_length=self.args.max_output_length)
if not quiet:
self.logger.info("Tokenizing Output ... Done!")
input_ids, attention_mask = tokenized_input["input_ids"], tokenized_input["attention_mask"]
decoder_input_ids, decoder_attention_mask = tokenized_output[
"input_ids"], tokenized_output["attention_mask"]
if self.load and not skip_cache:
preprocessed_data = [input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
metadata]
self.logger.info("Save preprocessed data ...")
with open(preprocessed_path, "w") as f:
json.dump([input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
metadata], f)
self.logger.info("Save preprocessed data ... Done!")
# self.logger.info("len(input_ids): {}".format(len(input_ids)))
# self.logger.info("len(decoder_input_ids): {}".format(len(decoder_input_ids)))
# self.logger.info("len(attention_mask): {}".format(len(attention_mask)))
# self.logger.info("len(decoder_attention_mask): {}".format(len(decoder_attention_mask)))
assert len(uuids) == len(input_ids) # make sure
self.dataset = MyQADataset(input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
in_metadata=None, out_metadata=metadata,
is_training=self.is_training, uuids=uuids)
if not quiet:
self.logger.info("Loaded {} examples from {} data".format(
len(self.dataset), self.data_type))
if do_return:
return self.dataset
def load_dataloader(self, do_return=False, is_training="self"):
if is_training == "self":
is_training = self.is_training
self.dataloader = MyDataLoader(
self.args, self.dataset, is_training)
if do_return:
return self.dataloader
def evaluate(self, predictions, verbose=False):
assert len(predictions) == len(self), (len(predictions), len(self))
predictions = [prediction.strip() for prediction in predictions]
return evaluate_func(predictions, self.data, self.metric)
# ems = []
# for (prediction, dp) in zip(predictions, self.data):
# ems.append(get_exact_match(prediction.strip(), [dp[1]]))
# return np.mean(ems)
def save_predictions(self, predictions, path_to_save=None):
assert len(predictions) == len(self), (len(predictions), len(self))
predictions = ['n/a' if len(prediction.strip()) ==
0 else prediction for prediction in predictions]
prediction_text = [
prediction.strip()+'\n' for prediction in predictions]
if path_to_save:
save_path = path_to_save
else:
save_path = os.path.join(
self.args.output_dir, "{}_predictions.txt".format(self.args.prefix))
with open(save_path, "w") as f:
f.writelines(prediction_text)
self.logger.info("Saved prediction in {}".format(save_path))
|
CMR-main
|
cmr/task_manager/dataloader.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
class MyQADataset(Dataset):
def __init__(self,
input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
in_metadata=None, out_metadata=None,
is_training=False, uuids=None, seed=42):
self.uuids = uuids
self.input_ids = torch.LongTensor(input_ids)
self.attention_mask = torch.LongTensor(attention_mask)
self.decoder_input_ids = torch.LongTensor(decoder_input_ids)
self.decoder_attention_mask = torch.LongTensor(decoder_attention_mask)
self.in_metadata = list(zip(range(len(input_ids)), range(1, 1+len(input_ids)))) \
if in_metadata is None else in_metadata
self.out_metadata = list(zip(range(len(decoder_input_ids)), range(1, 1+len(decoder_input_ids)))) \
if out_metadata is None else out_metadata
self.is_training = is_training
assert len(self.input_ids) == len(
self.attention_mask) == self.in_metadata[-1][-1]
assert len(self.decoder_input_ids) == len(
self.decoder_attention_mask) == self.out_metadata[-1][-1]
np.random.seed(seed) # for selecting the same answer if there are multiple
def __len__(self):
return len(self.in_metadata)
def __getitem__(self, idx):
if not self.is_training:
idx = self.in_metadata[idx][0]
return self.input_ids[idx], self.attention_mask[idx]
in_idx = np.random.choice(range(*self.in_metadata[idx]))
out_idx = np.random.choice(range(*self.out_metadata[idx])) # if there are multiple answers
# TODO: can we pass the self.uuids[in_idx] ?
return self.input_ids[in_idx], self.attention_mask[in_idx], \
self.decoder_input_ids[out_idx], self.decoder_attention_mask[out_idx]
class MyDataLoader(DataLoader):
def __init__(self, args, dataset, is_training):
if is_training:
sampler = RandomSampler(dataset)
batch_size = args.train_batch_size
else:
sampler = SequentialSampler(dataset)
batch_size = args.predict_batch_size
super(MyDataLoader, self).__init__(
dataset, sampler=sampler, batch_size=batch_size)
|
CMR-main
|
cmr/task_manager/base_datamanager.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import torch
import torch.nn as nn
from transformers import BartModel, RobertaModel
from transformers.activations import ACT2FN
from typing import List
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight, gain=0.0000001)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
# def RegularLinear(in_features, out_features, bias=True):
# m = nn.Linear(in_features, out_features, bias)
# nn.init.xavier_uniform_(m.weight, gain=1)
# if bias:
# nn.init.constant_(m.bias, 0.0)
# return m
# def HNetLinear(config, in_features, out_features, input_dim, output_dim, bias=True):
# var_e = 2 / (config.task_emb_dim + config.long_term_task_emb_num)
# weight_var_fanin = 1 / (2 * in_features * input_dim * var_e)
# weight_var_fanout = 1 / (in_features * output_dim * var_e)
# bias_var_fanin = 1 / (2 * config.task_emb_dim * var_e)
# bias_var_fanout = max((1 - (input_dim / output_dim)) / (config.task_emb_dim * var_e), 1e-10)
# weight_var = 2 / (1 / weight_var_fanin + 1 / weight_var_fanout)
# bias_var = 2 / (1 / bias_var_fanin + 1 / bias_var_fanout)
# m = nn.Linear(in_features, out_features, bias)
# nn.init.normal_(m.weight, 0, weight_var ** 0.5)
# if bias:
# nn.init.normal_(m.bias, 0, bias_var ** 0.5)
# return m
class MLP_Task2Adapter(nn.Module):
# takes in a encoded task description and generates parameters of an adapter
def __init__(self, config):
super().__init__()
self.input_dim = config.task_emb_dim # 768?
self.hidden_dim = config.generator_hdim
# TODO: set this output_dim = # params of adapters automatically.
self.output_dim = config.d_model * config.adapter_dim * 2 + config.d_model + config.adapter_dim
if config.adapt_layer_norm:
self.output_dim += 2 * config.d_model
self.linear1 = Linear(self.input_dim, self.hidden_dim)
self.activation_fn = ACT2FN[config.activation_function]
self.linear2 = Linear(self.hidden_dim, self.output_dim)
def forward(self, x):
x = self.linear1(x)
x = self.activation_fn(x)
x = self.linear2(x)
return x.view(-1)
class ParameterGenerator(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
modules = []
num_adapters = config.encoder_layers + config.decoder_layers # int
for _ in range(num_adapters):
modules.append(MLP_Task2Adapter(config))
self.decoders = nn.ModuleList(modules)
def decode(self, task_emb):
return [d(task_emb) for d in self.decoders]
def forward(self, task_embedding, concat=False):
adapter_params = self.decode(task_embedding)
if concat:
adapter_params = torch.cat(adapter_params)
return adapter_params
# class GrowingBart(nn.Module):
# def __init__(self, model, meta_model, config):
# super().__init__()
# self.config = config
# self.model = model
# self.meta_model = meta_model
# def set_relation(self, rel_ids, rel_masks):
# # generate adapter parameters using task descriptions
# generated_params = self.meta_model(rel_ids, attention_mask=rel_masks)
# # apply the parameters to the adapters
# self.apply_params_to_adapters(generated_params)
# def forward(self, rel_ids, rel_masks, input_ids, input_masks, output_ids, output_masks, is_training=False):
# # generate adapter parameters using task descriptions
# generated_params = self.meta_model(rel_ids, attention_mask=rel_masks)
# # apply the parameters to the adapters
# self.apply_params_to_adapters(generated_params)
# # use the adapted model to make zero-shot inference
# ret = self.model(input_ids, attention_mask=input_masks,
# decoder_input_ids=output_ids,
# decoder_attention_mask=output_masks,
# is_training=is_training
# )
# return ret
# def apply_params_to_adapters(self, generated_params):
# encoder_params, decoder_params = generated_params[:self.config.encoder_layers], generated_params[self.config.encoder_layers:]
# d_model = self.config.d_model
# d_adapter = self.config.adapter_dim
# for p, encoder_layer in zip(encoder_params, self.model.encoders()):
# # dw, db: down weight, down bias
# # uw, ub: up weight, up bias
# dw, uw, db, ub = p[0:d_model*d_adapter], \
# p[d_model*d_adapter:d_model*d_adapter*2], \
# p[d_model*d_adapter*2:d_model*d_adapter*2+d_adapter], \
# p[d_model*d_adapter*2+d_adapter:d_model*d_adapter*2+d_adapter+d_model]
# encoder_layer.adapter_down_weight = dw.view(d_model, d_adapter)
# encoder_layer.adapter_down_bias = db.view(d_adapter)
# encoder_layer.adapter_up_weight = uw.view(d_adapter, d_model)
# encoder_layer.adapter_up_bias = ub.view(d_model)
# if self.config.adapt_layer_norm:
# encoder_layer.self_attn_layer_norm.weight.data = encoder_layer.self_attn_layer_norm.weight.data + p[-2*d_model: -1*d_model]
# encoder_layer.self_attn_layer_norm.bias.data = encoder_layer.self_attn_layer_norm.bias.data + p[-1*d_model:]
# for p, decoder_layer in zip(decoder_params, self.model.decoders()):
# dw, uw, db, ub = p[0:d_model*d_adapter], \
# p[d_model*d_adapter:d_model*d_adapter*2], \
# p[d_model*d_adapter*2:d_model*d_adapter*2+d_adapter], \
# p[d_model*d_adapter*2+d_adapter:d_model*d_adapter*2+d_adapter+d_model]
# decoder_layer.adapter_down_weight = dw.view(d_model, d_adapter)
# decoder_layer.adapter_down_bias = db.view(d_adapter)
# decoder_layer.adapter_up_weight = uw.view(d_adapter, d_model)
# decoder_layer.adapter_up_bias = ub.view(d_model)
# if self.config.adapt_layer_norm:
# decoder_layer.self_attn_layer_norm.weight.data = decoder_layer.self_attn_layer_norm.weight.data + p[-2*d_model: -1*d_model]
# decoder_layer.self_attn_layer_norm.bias.data = decoder_layer.self_attn_layer_norm.bias.data + p[-1*d_model:]
# # a = self.model.decoders()[-4]
# # print(a.adapter_down_weight)
# # print(a.adapter_down_bias)
# # print(a.adapter_up_weight)
# # print(a.adapter_up_bias)
|
CMR-main
|
cmr/models/hypernet.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
|
CMR-main
|
cmr/models/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This script was based on https://github.com/shmsw25/bart-closed-book-qa.
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from transformers import T5ForConditionalGeneration, BartForConditionalGeneration
from transformers.modeling_bart import shift_tokens_right
from .utils import label_smoothed_nll_loss
class MyBart(BartForConditionalGeneration):
def forward(self, input_ids, attention_mask=None, encoder_outputs=None,
decoder_input_ids=None, decoder_attention_mask=None, decoder_cached_states=None,
use_cache=False, is_training=False, return_all_loss=False):
if is_training:
_decoder_input_ids = shift_tokens_right(
decoder_input_ids, self.config.pad_token_id)
else:
_decoder_input_ids = decoder_input_ids
outputs = self.model(
input_ids,
attention_mask=attention_mask,
encoder_outputs=encoder_outputs,
decoder_input_ids=_decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_cached_states=decoder_cached_states,
use_cache=use_cache,
)
lm_logits = F.linear(
outputs[0], self.model.shared.weight, bias=self.final_logits_bias)
if is_training:
lprobs = F.log_softmax(lm_logits, dim=-1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs, decoder_input_ids, epsilon=0.1, ignore_index=self.config.pad_token_id, return_all_loss=return_all_loss)
return loss
return (lm_logits, ) + outputs[1:]
|
CMR-main
|
cmr/models/mybart.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This script was based on https://github.com/shmsw25/bart-closed-book-qa.
import os
import numpy as np
import torch
from transformers import BartTokenizer, BartConfig
from transformers import AdamW, get_linear_schedule_with_warmup
from cmr.task_manager.dataloader import GeneralDataset
from .mybart import MyBart
from .utils import freeze_embeds, trim_batch, convert_model_to_single_gpu
import json
from tqdm import tqdm
import copy
def run(args, logger):
tokenizer = BartTokenizer.from_pretrained("bart-large")
train_data = GeneralDataset(logger, args, args.train_file,
data_type="train", is_training=True, task_name=args.dataset)
dev_data = GeneralDataset(logger, args, args.dev_file,
data_type="dev", is_training=False, task_name=args.dataset)
train_data.load_dataset(tokenizer)
train_data.load_dataloader()
dev_data.load_dataset(tokenizer)
dev_data.load_dataloader()
best_dev_performance = None
test_performance = None
best_model_state_dict = None
if args.do_train:
if args.checkpoint is not None and args.checkpoint != "None":
logger.info(f"Loading checkpoint: {args.checkpoint}")
model = MyBart.from_pretrained(args.model,
state_dict=convert_model_to_single_gpu(torch.load(args.checkpoint)))
else:
model = MyBart.from_pretrained(args.model)
if args.freeze_embeds:
logger.info("Freezing embeddings")
freeze_embeds(model)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if torch.cuda.is_available():
model.to(torch.device("cuda"))
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
args.total_steps = args.num_train_epochs * len(train_data.dataloader)
logger.info(f"args.total_steps = {args.total_steps}")
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=args.total_steps)
best_dev_performance, best_model_state_dict = train(
args, logger, model, train_data, dev_data, optimizer, scheduler)
if args.do_predict:
if args.do_train and best_model_state_dict is not None:
model = MyBart.from_pretrained(args.model,
state_dict=best_model_state_dict)
logger.info("Loading checkpoint from CPU")
else:
checkpoint = os.path.join(args.predict_checkpoint)
model = MyBart.from_pretrained(args.model,
state_dict=convert_model_to_single_gpu(torch.load(checkpoint)))
logger.info("Loading checkpoint from {}".format(checkpoint))
if torch.cuda.is_available():
model.to(torch.device("cuda"))
model.eval()
data_type = "test" if "test" in args.test_file else "dev"
test_data = GeneralDataset(
logger, args, args.test_file, data_type=data_type, is_training=False, task_name=args.dataset)
test_data.load_dataset(tokenizer)
test_data.load_dataloader()
test_performance = inference(
model, test_data, save_predictions=True, verbose=True, args=args, logger=logger)
logger.info("%s on %s data: %.s" % (test_data.metric,
test_data.data_type, str(test_performance)))
return best_dev_performance, test_performance
def train(args, logger, model, train_data, dev_data, optimizer, scheduler):
model.train()
global_step = 0
train_losses = []
best_performance = None
stop_training = False
logger.info("Starting training!")
for epoch in range(int(args.num_train_epochs)):
for batch in tqdm(train_data.dataloader, desc="Epoch {}".format(epoch), disable=args.quiet):
global_step += 1
if torch.cuda.is_available():
# logger.info(f"torch.cuda.is_available()={torch.cuda.is_available()}")
batch = [b.to(torch.device("cuda")) for b in batch]
pad_token_id = train_data.tokenizer.pad_token_id
batch[0], batch[1] = trim_batch(batch[0], pad_token_id, batch[1])
batch[2], batch[3] = trim_batch(batch[2], pad_token_id, batch[3])
loss = model(input_ids=batch[0], attention_mask=batch[1],
decoder_input_ids=batch[2], decoder_attention_mask=batch[3],
is_training=True)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if torch.isnan(loss).data:
logger.info("Stop training because loss=%s" % (loss.data))
stop_training = True
break
train_losses.append(loss.detach().cpu())
loss.backward()
if global_step % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm)
optimizer.step() # We have accumulated enough gradients
scheduler.step()
model.zero_grad()
if global_step % args.eval_period == 0:
model.eval()
curr_performance = inference(
model if args.n_gpu == 1 else model.module, dev_data, args=args, save_predictions=True, logger=logger)
# TODO: save predictions when eval during training
logger.info("Step %d Train loss %.2f %s %s on epoch=%d" % (
global_step,
np.mean(train_losses),
dev_data.metric,
curr_performance,
epoch))
train_losses = []
def is_improved(best, curr):
if best is None:
return True
return any([best[m] < curr[m] for m in best])
if is_improved(best_performance, curr_performance):
best_model_state_dict = {k: v.cpu() for (
k, v) in model.state_dict().items()}
# save results
logger.info("New best perfromance %s: %s -> %s on epoch=%d, global_step=%d" %
(dev_data.metric, best_performance, curr_performance, epoch, global_step))
best_model_path = os.path.join(
args.output_dir, "best-model.pt")
with open(best_model_path.replace(".pt", "_results.json"), "w") as f:
json.dump(curr_performance, f)
logger.info(
"Saving the new best model to {}".format(best_model_path))
torch.save(best_model_state_dict, best_model_path)
best_performance = curr_performance
wait_step = 0
stop_training = False
else:
wait_step += 1
if wait_step >= args.wait_step:
stop_training = True
break
model.train()
if global_step >= args.total_steps:
stop_training = True
break
if stop_training:
break
# model_state_dict = {k:v.cpu() for (k, v) in model.state_dict().items()}
# torch.save(model_state_dict, os.path.join(args.output_dir, "last-model.pt"))
return best_performance, best_model_state_dict
def inference(model, dev_data, save_predictions=False, verbose=False, args=None, logger=None, return_all=False, predictions_only=False, compute_loss=False, loss_only=False):
model.eval()
predictions = []
bos_token_id = dev_data.tokenizer.bos_token_id
losses = [] # if needed
if args and hasattr(args, "quiet"):
quiet = args.quiet
else:
quiet = not verbose
if not quiet:
logger.info("Starting inference ...")
for batch in tqdm(dev_data.dataloader, desc="Infernece", disable=quiet):
if torch.cuda.is_available():
batch = [b.to(torch.device("cuda")) for b in batch]
pad_token_id = dev_data.tokenizer.pad_token_id
batch[0], batch[1] = trim_batch(batch[0], pad_token_id, batch[1])
if compute_loss:
# to compute loss
batch[2], batch[3] = trim_batch(batch[2], pad_token_id, batch[3])
loss = model(input_ids=batch[0], attention_mask=batch[1],
decoder_input_ids=batch[2], decoder_attention_mask=batch[3],
is_training=True, return_all_loss=True)
# TODO: double check this part. are the results correct?
# TODO: do we need to use mean?
# logger.info(loss.shape)
loss = loss.squeeze(-1)
# logger.info(loss.shape)
loss = loss.detach().cpu()
# logger.info(f"torch.sum(loss.squeeze(-1), 1) = {torch.sum(loss.squeeze(-1), 1)}")
for each_loss in loss:
num_nonzeros = (each_loss!=0).sum(0)
norm_loss = each_loss.sum()/ num_nonzeros
# add the normalized loss for each sentence.
losses.append(norm_loss)
if return_all:
pass
if not loss_only:
outputs = model.generate(input_ids=batch[0],
attention_mask=batch[1],
num_beams=dev_data.args.num_beams,
max_length=dev_data.args.max_output_length,
decoder_start_token_id=model.config.bos_token_id,
early_stopping=dev_data.gen_early_stop,)
for input_, output in zip(batch[0], outputs):
pred = dev_data.decode(output)
predictions.append(pred)
if not quiet:
logger.info("Starting inference ... Done")
if loss_only:
return losses
if predictions_only:
return predictions
if save_predictions:
dev_data.save_predictions(predictions, )
# logger.info("Starting evaluation metric ...")
result = dev_data.evaluate(predictions, verbose=verbose)
# logger.info("Starting evaluation metric ... Done!")
if return_all:
return predictions, result, losses
return result
|
CMR-main
|
cmr/models/run_bart.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This script was based on https://github.com/shmsw25/bart-closed-book-qa.
import copy
import torch.nn as nn
import random
import numpy as np
import torch
def set_seeds(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def convert_model_to_single_gpu(state_dict):
def _convert(key):
if key.startswith('module.'):
return key[7:]
return key
return {_convert(key): value for key, value in state_dict.items()}
def label_smoothed_nll_loss(lprobs, target, epsilon=0.1, ignore_index=-100, return_all_loss=False):
"""From fairseq"""
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if not return_all_loss:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / lprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
def freeze_params(model: nn.Module):
"""Set requires_grad=False for each of model.parameters()"""
for par in model.parameters():
par.requires_grad = False
def freeze_embeds(model):
"""Freeze token embeddings and positional embeddings for bart, just token embeddings for t5."""
model_type = model.config.model_type
if model_type == "t5":
freeze_params(model.shared)
for d in [model.encoder, model.decoder]:
freeze_params(d.embed_tokens)
elif model_type == "fsmt":
for d in [model.model.encoder, model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
else:
freeze_params(model.model.shared)
for d in [model.model.encoder, model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
def trim_batch(
input_ids,
pad_token_id,
attention_mask=None,
):
"""Remove columns that are populated exclusively by pad_token_id"""
keep_column_mask = input_ids.ne(pad_token_id).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
|
CMR-main
|
cmr/models/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from transformers.modeling_bart import EncoderLayer, DecoderLayer, BartEncoder, BartDecoder, BartModel, BartForConditionalGeneration
from transformers.modeling_bart import shift_tokens_right
from transformers.configuration_bart import BartConfig
from transformers.configuration_utils import PretrainedConfig
from .utils import label_smoothed_nll_loss
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
class BartWithAdapterConfig(BartConfig):
def __init__(
self,
activation_dropout=0.0,
activation_function="gelu",
vocab_size=50265,
d_model=1024,
encoder_ffn_dim=4096,
encoder_layers=12,
encoder_attention_heads=16,
decoder_ffn_dim=4096,
decoder_layers=12,
decoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
attention_dropout=0.0,
dropout=0.1,
max_position_embeddings=1024,
init_std=0.02,
classifier_dropout=0.0,
num_labels=3,
is_encoder_decoder=True,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
normalize_before=False,
add_final_layer_norm=False,
scale_embedding=False,
normalize_embedding=True,
static_position_embeddings=False,
add_bias_logits=False,
adapter_dim=64,
adapt_layer_norm=False,
unfreeze_hyper_encoder=False,
**common_kwargs
):
if "hidden_size" in common_kwargs:
raise ValueError("hidden size is called d_model")
super().__init__(
num_labels=num_labels,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
**common_kwargs,
)
self.vocab_size = vocab_size
self.d_model = d_model # encoder_embed_dim and decoder_embed_dim
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = self.num_hidden_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.max_position_embeddings = max_position_embeddings
self.init_std = init_std # Normal(0, this parameter)
self.activation_function = activation_function
# Params introduced for Mbart
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
self.normalize_embedding = normalize_embedding # True for mbart, False otherwise
self.normalize_before = normalize_before # combo of fairseq's encoder_ and decoder_normalize_before
self.add_final_layer_norm = add_final_layer_norm
# Params introduced for Marian
self.add_bias_logits = add_bias_logits
self.static_position_embeddings = static_position_embeddings
# 3 Types of Dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.dropout = dropout
# Classifier stuff
self.classif_dropout = classifier_dropout
# Adapter
self.adapter_dim = adapter_dim
# Hypernet
self.generator_hdim = int(self.d_model * 0.25) # TODO: make it a tunable hp.
self.adapt_layer_norm = adapt_layer_norm
self.unfreeze_hyper_encoder = unfreeze_hyper_encoder # TODO: should be
def Linear(in_features, out_features, bias=True, std=0.0000001):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight, gain=std)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
class EncoderLayerWithAdapter(EncoderLayer):
def __init__(self, config: BartConfig):
super(EncoderLayerWithAdapter, self).__init__(config)
self.adapter_dim = config.adapter_dim
# self.adapter_down_weight = torch.zeros(self.embed_dim, self.adapter_dim)
# self.adapter_down_bias = torch.zeros(self.adapter_dim)
# self.adapter_up_weight = torch.zeros(self.adapter_dim, self.embed_dim)
# self.adapter_up_bias = torch.zeros(self.embed_dim)
self.adapter_down_layer = Linear(self.embed_dim, self.adapter_dim, config.init_std)
self.adapter_up_layer = Linear(self.adapter_dim, self.embed_dim, config.init_std)
def adapter_down(self, x):
# print(x.size())
# print(self.adapter_down_weight.size())
# z = x * self.adapter_down_weight
# print(z.size())
# return F.linear(x, self.adapter_down_weight.t(), self.adapter_down_bias)
# return x * self.adapter_down_weight + self.adapter_down_bias
return self.adapter_down_layer(x)
def adapter_up(self, x):
# return F.linear(x, self.adapter_up_weight.t(), self.adapter_up_bias)
# return x * self.adapter_up_weight + self.adapter_up_bias
return self.adapter_up_layer(x)
def forward(self, x, encoder_padding_mask):
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, attn_weights = self.self_attn(
query=x, key=x, key_padding_mask=encoder_padding_mask, need_weights=self.output_attentions
)
x = F.dropout(x, p=self.dropout, training=self.training)
residual_adapter = x
x = self.adapter_down(x)
x = self.activation_fn(x)
x = self.adapter_up(x)
x = residual_adapter + x
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x)
return x, attn_weights
class DecoderLayerWithAdapter(DecoderLayer):
def __init__(self, config: BartConfig):
super(DecoderLayerWithAdapter, self).__init__(config)
self.adapter_dim = config.adapter_dim
# self.adapter_down_weight = torch.zeros(self.embed_dim, self.adapter_dim)
# self.adapter_down_bias = torch.zeros(self.adapter_dim)
# self.adapter_up_weight = torch.zeros(self.adapter_dim, self.embed_dim)
# self.adapter_up_bias = torch.zeros(self.embed_dim)
self.adapter_down_layer = Linear(self.embed_dim, self.adapter_dim, config.init_std)
self.adapter_up_layer = Linear(self.adapter_dim, self.embed_dim, config.init_std)
def adapter_down(self, x):
# return F.linear(x, self.adapter_down_weight.t(), self.adapter_down_bias)
return self.adapter_down_layer(x)
def adapter_up(self, x):
# return F.linear(x, self.adapter_up_weight.t(), self.adapter_up_bias)
return self.adapter_up_layer(x)
def forward(
self,
x,
encoder_hidden_states,
encoder_attn_mask=None,
layer_state=None,
causal_mask=None,
decoder_padding_mask=None,
):
residual = x
if layer_state is None:
layer_state = {}
if self.normalize_before:
x = self.self_attn_layer_norm(x)
# Self Attention
x, self_attn_weights = self.self_attn(
query=x,
key=x,
layer_state=layer_state, # adds keys to layer state
key_padding_mask=decoder_padding_mask,
attn_mask=causal_mask,
need_weights=self.output_attentions,
)
x = F.dropout(x, p=self.dropout, training=self.training)
residual_adapter = x
x = self.adapter_down(x)
x = self.activation_fn(x)
x = self.adapter_up(x)
x = residual_adapter + x
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
# Cross attention
residual = x
assert self.encoder_attn.cache_key != self.self_attn.cache_key
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
x, _ = self.encoder_attn(
query=x,
key=encoder_hidden_states,
key_padding_mask=encoder_attn_mask,
layer_state=layer_state, # mutates layer state
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
# Fully Connected
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x)
return (
x,
self_attn_weights,
layer_state,
) # just self_attn weights for now, following t5, layer_state = cache for decoding
class BartEncodeWithAdapter(BartEncoder):
def __init__(self, config: BartConfig, embed_tokens):
super(BartEncodeWithAdapter, self).__init__(config, embed_tokens)
self.layers = nn.ModuleList(
[EncoderLayerWithAdapter(config) for _ in range(config.encoder_layers)]
)
class BartDecoderWithAdapter(BartDecoder):
def __init__(self, config: BartConfig, embed_tokens: nn.Embedding):
super(BartDecoderWithAdapter, self).__init__(config, embed_tokens)
self.layers = nn.ModuleList(
[DecoderLayerWithAdapter(config) for _ in range(config.decoder_layers)]
)
class BartModelWithAdapter(BartModel):
def __init__(self, config: BartConfig):
super(BartModelWithAdapter, self).__init__(config)
self.encoder = BartEncodeWithAdapter(config, self.shared)
self.decoder = BartDecoderWithAdapter(config, self.shared)
class BartForConditionalGenerationWithAdapter(BartForConditionalGeneration):
def __init__(self, config: BartConfig):
super().__init__(config)
base_model = BartModelWithAdapter(config)
self.model = base_model
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
class MyBartWithAdapter(BartForConditionalGenerationWithAdapter):
def forward(self, input_ids, attention_mask=None, encoder_outputs=None,
decoder_input_ids=None, decoder_attention_mask=None, decoder_cached_states=None,
use_cache=False, is_training=False):
if is_training:
_decoder_input_ids = shift_tokens_right(decoder_input_ids, self.config.pad_token_id)
else:
_decoder_input_ids = decoder_input_ids
outputs = self.model(
input_ids,
attention_mask=attention_mask,
encoder_outputs=encoder_outputs,
decoder_input_ids=_decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_cached_states=decoder_cached_states,
use_cache=use_cache,
)
lm_logits = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias)
if is_training:
# loss_fct = nn.CrossEntropyLoss(reduction="mean", ignore_index=self.config.pad_token_id)
# loss = loss_fct(lm_logits.view(-1, self.config.vocab_size),
# decoder_input_ids.view(-1))
lprobs = F.log_softmax(lm_logits, dim=-1)
loss, _ = label_smoothed_nll_loss(lprobs, decoder_input_ids, epsilon=0.1, ignore_index=self.config.pad_token_id)
return loss
return (lm_logits, ) + outputs[1:]
def encoders(self):
return self.model.encoder.layers
def decoders(self):
return self.model.decoder.layers
def backup_layer_norm_parameters(self):
for encoder in self.encoders():
encoder.self_attn_layer_norm_bc = copy.deepcopy(encoder.self_attn_layer_norm)
for decoder in self.decoders():
decoder.self_attn_layer_norm_bc = copy.deepcopy(decoder.self_attn_layer_norm)
def restore_layer_norm_parameters(self):
for encoder in self.encoders():
encoder.self_attn_layer_norm = copy.deepcopy(encoder.self_attn_layer_norm_bc)
for decoder in self.decoders():
decoder.self_attn_layer_norm = copy.deepcopy(decoder.self_attn_layer_norm_bc)
|
CMR-main
|
cmr/models/bart_with_adapater.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
from tqdm import tqdm
import json
import random
class OfflineDebugger(ContinualFinetuning):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "offline_debug"
def _check_debugger_args(self):
super()._check_debugger_args()
required_atts = [
# additional hyper parameters
"offline_retrain_upstream",]
assert all([hasattr(self.debugger_args, att) for att in required_atts])
def _get_all_init_errors(self):
data_args = self.data_args
all_init_errors = []
for data_batch in tqdm(self.data_stream, desc="Creating the data loaders."):
if data_args.max_timecode > 0 and len(self.data_eval_loaders) >= data_args.max_timecode:
break
all_init_errors += [item for item in data_batch if item["init_status"] == "error"]
all_init_errors = self.data_formatter(all_init_errors)
return all_init_errors
def offline_debug(self):
""""This function is to generate the bound when fixing the errors offline."""
self.logger.info("Start Offline Debugging")
self.timecode = -1
# TODO: get the all_bug_examples
init_errors = self._get_all_init_errors()
# get the upstream examples
with open(self.data_args.upstream_data_path) as f:
upstream_memory_examples = [json.loads(line)for line in set(f.read().splitlines())]
upstream_memory_examples = self.upstream_data_formatter(upstream_memory_examples)
if self.debugger_args.offline_retrain_upstream:
merged_examples = init_errors + upstream_memory_examples
else:
merged_examples = init_errors
# dl, _ = self.get_dataloader(self.data_args, merged_examples, mode="train")
# self.fix_bugs(dl, quiet=False)
# self._save_base_model(ckpt_name="offline")
|
CMR-main
|
cmr/debug_algs/offline_debug_bounds.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from argparse import Namespace
from logging import disable
import numpy as np
import torch
from cmr.models.mybart import MyBart
from cmr.models import run_bart
from cmr.models.utils import (convert_model_to_single_gpu,
freeze_embeds, trim_batch)
from cmr.task_manager.dataloader import GeneralDataset
from transformers import (AdamW, BartConfig, BartTokenizer,
get_linear_schedule_with_warmup)
from cmr.debug_algs.commons import OnlineDebuggingMethod
from tqdm import tqdm
import copy
class ContinualFinetuning(OnlineDebuggingMethod):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "simple_cl"
def _check_debugger_args(self):
required_atts = ["weight_decay",
"learning_rate",
"adam_epsilon",
"warmup_steps",
"total_steps",
"num_epochs",
"gradient_accumulation_steps",
"max_grad_norm",
"diff_loss_weight"]
assert all([hasattr(self.debugger_args, att) for att in required_atts])
return
def load_base_model(self, base_model_args, mode="online_debug"):
self.base_model_args = base_model_args
model_type, base_model_path = base_model_args.model_type, base_model_args.base_model_path
self.logger.info(
f"Loading checkpoint from {base_model_path} for {model_type} .....")
self.base_model = MyBart.from_pretrained(model_type,
state_dict=convert_model_to_single_gpu(torch.load(base_model_path)))
self.logger.info(
f"Loading checkpoint from {base_model_path} for {model_type} ..... Done!")
if self.use_cuda:
self.base_model.to(torch.device("cuda"))
self.logger.info("Moving to the GPUs.")
if self.n_gpu > 1:
self.base_model = torch.nn.DataParallel(self.base_model)
def base_model_infer(self, eval_dataloader, verbose=False):
self.base_model.eval()
model = self.base_model if self.n_gpu == 1 else self.base_model.module
predictions = run_bart.inference(model, eval_dataloader, save_predictions=False, verbose=verbose,
logger=self.logger, return_all=False, predictions_only=True, args=Namespace(quiet=True))
return predictions
def data_formatter(self, bug_batch):
# The continual fine-tuning method only uses the correct answers for fixing bugs.
formatted_bug_batch = []
for bug in bug_batch:
# if "id" not in bug:
# _id = len(formatted_bug_batch)
_id = bug["id"]
_input = bug["input"]
# _mistake = bug["mistake"]
# TODO: only for now debugging.
if "truth" in bug:
_truth = bug["truth"] # a list of answers
else:
_truth = bug["output"] # a list of answers
formatted_bug_batch.append((_input, _truth, _id))
return formatted_bug_batch
def get_dataloader(self, bug_data_args, formatted_bug_batch, mode="both", is_training="self"):
# mini bug-batch size.
assert hasattr(bug_data_args, "train_batch_size")
assert hasattr(bug_data_args, "predict_batch_size")
train_bug_dataloader, eval_bug_dataloader = None, None
if mode == "both" or mode == "train":
# for error-fixing
train_bug_dataloader = GeneralDataset(self.logger, bug_data_args, None,
data_type="train", is_training=True,
task_name=bug_data_args.task_name,
given_data=formatted_bug_batch)
train_bug_dataloader.load_dataset(
self.tokenizer, skip_cache=True, quiet=True)
train_bug_dataloader.load_dataloader(is_training=is_training)
if mode == "both" or mode == "eval":
# for evaluation
eval_bug_dataloader = GeneralDataset(self.logger, bug_data_args, None,
data_type="dev", is_training=False,
task_name=bug_data_args.task_name,
given_data=formatted_bug_batch)
eval_bug_dataloader.load_dataset(
self.tokenizer, skip_cache=True, quiet=True)
eval_bug_dataloader.load_dataloader()
return train_bug_dataloader, eval_bug_dataloader
def reset_optimizer(self):
no_decay = ['bias', 'LayerNorm.weight']
self.optimizer_grouped_parameters = [
{'params': [p for n, p in self.base_model.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': self.debugger_args.weight_decay},
{'params': [p for n, p in self.base_model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
self.optimizer = AdamW(self.optimizer_grouped_parameters,
lr=self.debugger_args.learning_rate, eps=self.debugger_args.adam_epsilon)
# TODO: double check the decision about warm up for fine-tuning
self.scheduler = get_linear_schedule_with_warmup(self.optimizer,
num_warmup_steps=self.debugger_args.warmup_steps,
num_training_steps=self.debugger_args.total_steps)
self.logger.info(f"optimizer & scheduler Setup ...... Done!")
def debugger_setup(self, debugger_args):
self.debugger_args = debugger_args
self._check_debugger_args()
self.logger.info(f"Debugger Setup ......")
self.logger.info(f"debugger_args: {debugger_args} ......")
self.reset_optimizer()
self.logger.info(f"Debugger Setup ...... Done!")
return
def fix_bugs(self, bug_loader, quiet=True):
# bug_dataloader is from self.bug_loaders
self.base_model.train()
train_losses = []
global_step = 0
if self.debugger_args.diff_loss_weight > 0:
last_weights = copy.deepcopy(list(self.base_model.parameters()))
for epoch_id in range(int(self.debugger_args.num_epochs)):
for batch in tqdm(bug_loader.dataloader, desc=f"Bug-fixing Epoch {epoch_id}", disable=quiet):
global_step += 1
# here the batch is a mini batch of the current bug batch
if self.use_cuda:
# print(type(batch[0]), batch[0])
batch = [b.to(torch.device("cuda")) for b in batch]
pad_token_id = self.tokenizer.pad_token_id
batch[0], batch[1] = trim_batch(
batch[0], pad_token_id, batch[1])
batch[2], batch[3] = trim_batch(
batch[2], pad_token_id, batch[3])
loss = self.base_model(input_ids=batch[0], attention_mask=batch[1],
decoder_input_ids=batch[2], decoder_attention_mask=batch[3],
is_training=True)
if self.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
# For L2 norm
if self.debugger_args.diff_loss_weight > 0:
diff_loss = torch.Tensor([0]).to("cuda" if torch.cuda.is_available() else "cpu")
# Iterate over base_weights and curr_weights and accumulate the euclidean norm
# of their differences
curr_weights = list(self.base_model.parameters())
for base_param, curr_param in zip(last_weights, curr_weights):
diff_loss += (curr_param - base_param).pow(2).sum()
# self.logger.info(f"loss={loss}; diff_loss={diff_loss}; l2w={self.debugger_args.diff_loss_weight}")
loss = loss + self.debugger_args.diff_loss_weight * diff_loss
train_losses.append(loss.detach().cpu())
loss.backward()
self.model_update_steps += 1
if global_step % self.debugger_args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(
self.base_model.parameters(), self.debugger_args.max_grad_norm)
self.optimizer.step() # We have accumulated enough gradients
self.scheduler.step()
self.base_model.zero_grad()
# last_weights = copy.deepcopy(list(self.base_model.parameters())) # update the last weights
if self.debugger_args.diff_loss_weight > 0:
del last_weights
return
|
CMR-main
|
cmr/debug_algs/cl_simple_alg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import json
from altair.vegalite.v4.api import value
import numpy as np
import sys
import os
from numpy.lib.function_base import median
def get_prefix(filepath):
return filepath.split("/")[2].replace("_offline_eval","").replace("nq_dev_", "")[5:]
def eval_forgetting(online_debug_result, timecodes):
pass_forgetting_data = []
# Eval the forggeting issue.
em_on_passes = []
f1_on_passes = []
for timecode in timecodes:
item = online_debug_result[str(timecode)]
r = item["eval_results_overall_forget"]["metric_results"]
em_on_passes.append(r["EM"])
f1_on_passes.append(r["QA-F1"])
worse = np.min(em_on_passes)
mean = np.mean(em_on_passes)
# median = np.median(em_on_passes)
final = em_on_passes[-1]
# print(f"Forgetting measure (EM): worse={worse}; mean={mean}; final={final}")
return worse, mean, final
def eval_error_fixing(online_debug_result, timecodes):
final_state_bug_fixing_rate = online_debug_result[str(timecodes[-1])]["eval_results_overall_bug"]["metric_results"]["EM"]
bug_fixing_rates = [online_debug_result[str(t)]["eval_results_overall_bug"]["metric_results"]["EM"] for t in timecodes]
inter_prefix_efr = []
inter_respon_efr = []
bsz = 20
odr = online_debug_result
# TODO: add these back later.
# for timecode, ((before, after), em_fixed, f1_fixed, em_prefixed, f1_prefixed) in \
# enumerate(zip(odr["res_on_bugs"], odr["em_fixed_bugs"], odr["f1_fixed_bugs"], odr["em_prefixed_bugs"], odr["f1_prefixed_bugs"])):
# inter_prefix_efr.append(len(em_prefixed)/bsz)
# inter_respon_efr.append(len(em_fixed)/(bsz-len(em_prefixed)))
# mean_ip_efr = np.mean(inter_prefix_efr)
# mean_ir_efr = np.mean(inter_respon_efr)
# print(f"Bug-Fixing measure (EM): final_state_bug_fixing_rate={final_state_bug_fixing_rate};")
# print(f"Bug-Fixing measure (EM): mean_ip_efr={mean_ip_efr}; mean_ir_efr={mean_ir_efr};")
mean_ip_efr, mean_ir_efr = 0, 0
best_efr = np.max(bug_fixing_rates)
mean_efr = np.mean(bug_fixing_rates)
return final_state_bug_fixing_rate, best_efr, mean_efr
def print_eval(path="bug_data/output/nq_dev_0625_1e-5_e3_result.json"):
# Load the json data
lr = path.split("_")[-5]
num_epoch = path.split("_")[-4][1:]
prefix = get_prefix(path)
assert os.path.exists(path)
all_results = json.load(open(path))
# print(output_info.keys())
# online_debug_results = output_info["online_debug_results"]
timecodes = [int(t) for t in list(all_results.keys())]
timecodes = sorted(timecodes, reverse=False)
worse_kr, mean_kr, final_kr = eval_forgetting(all_results, timecodes)
final_efr, best_efr, mean_efr = eval_error_fixing(all_results, timecodes)
final_f1 = 2*(final_kr*final_efr)/(final_kr+final_efr)
mean_f1 = 2*(mean_kr*mean_efr)/(mean_kr+mean_efr)
print(f"{prefix}, {worse_kr}, {mean_kr}, {final_kr}, {best_efr}, {mean_efr}, {final_efr}, {mean_f1} , {final_f1}")
def aggregate_offline_results(path="bug_data/output/nq_dev_0701_v2_offline_eval/"):
import glob
alltime_results = {}
for thread_res_path in sorted(glob.glob(f"{path}/thread_*.json")):
with open(thread_res_path) as f:
thread_res = json.load(f)
# for key, values in single_res.items():
# if key not in alltime_results:
# alltime_results[key] = []
# alltime_results[key] += values
alltime_results.update(thread_res)
with open(f"{path}/alltime_result.json", "w") as f:
json.dump(alltime_results, f)
if __name__ == '__main__':
# aggregate_offline_results("bug_data/output/nq_dev_0706_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0706_3e-5_e3_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0706_1e-5_e3_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0706_1e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0708_ewc_l0.5_g1_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0708_ewc_l5_g1_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0708_ewc_l50_g1_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0708_ewc_l500_g1_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0708_ewc_l5000_g1_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0708_ewc_l50000_g1_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0708_ewc_withup_l500_g1_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0708_ewc_withup_l5000_g1_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0709_simplereplay_rsz30_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0709_simplereplay_rsz10_3e-5_e5_offline_eval")
# aggregate_offline_results("bug_data/output/nq_dev_0709_simplereplay_rsz100_3e-5_e5_offline_eval")
aggregate_offline_results("bug_data/output/nq_dev_0716_mbpapp_rsz32_rf30_3e-5_e5_offline_eval")
aggregate_offline_results("bug_data/output/nq_dev_0716v1_mbpapp_rsz32_rf30_3e-5_e5_woadapt_offline_eval")
aggregate_offline_results("bug_data/output/nq_dev_0716_mbpa_3e-5_e5_offline_eval")
print("{prefix}, {worse_kr}, {mean_kr}, {final_kr}, {best_efr}, {mean_efr}, {final_efr}, {mean_f1}, {final_f1}")
print_eval("bug_data/output/nq_dev_0706_1e-5_e3_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0706_3e-5_e3_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0706_1e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0706_3e-5_e5_offline_eval/alltime_result.json")
print("-"*50)
print_eval("bug_data/output/nq_dev_0708_ewc_l0.5_g1_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0708_ewc_l5_g1_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0708_ewc_l50_g1_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0708_ewc_l500_g1_3e-5_e5_offline_eval/alltime_result.json") # the best
print_eval("bug_data/output/nq_dev_0708_ewc_l5000_g1_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0708_ewc_l50000_g1_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0708_ewc_withup_l500_g1_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0708_ewc_withup_l5000_g1_3e-5_e5_offline_eval/alltime_result.json")
print("-"*50)
print_eval("bug_data/output/nq_dev_0709_simplereplay_rsz10_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0709_simplereplay_rsz30_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0709_simplereplay_rsz100_3e-5_e5_offline_eval/alltime_result.json")
print("-"*50)
print_eval("bug_data/output/nq_dev_0716_mbpapp_rsz32_rf30_3e-5_e5_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0716v1_mbpapp_rsz32_rf30_3e-5_e5_woadapt_offline_eval/alltime_result.json")
print_eval("bug_data/output/nq_dev_0716_mbpa_3e-5_e5_offline_eval/alltime_result.json")
|
CMR-main
|
cmr/debug_algs/evaluation.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from argparse import Namespace
import argparse
from torch import detach
from cmr.models.utils import set_seeds
from cmr.debug_algs.cl_none import NoneCL, OfflineCL
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
from cmr.debug_algs.cl_online_ewc_alg import OnlineEWC
from cmr.debug_algs.offline_debug_bounds import OfflineDebugger
from cmr.debug_algs.cl_mbcl_alg import MemoryBasedCL
from cmr.debug_algs.index_based.cl_indexed_alg import IndexBasedCL
from cmr.debug_algs.cl_hypernet_alg import HyperCL
from cmr.debug_algs.distant_supervision import data_collection
import logging
import os
import json
from tqdm import tqdm
import numpy as np
import wandb
class TqdmHandler(logging.Handler):
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg) # , file=sys.stderr)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def setup_args(args):
set_seeds(args.seed)
prefix = args.prefix
log_filename = f"logs/{prefix}_online_debug.log"
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
handlers=[logging.FileHandler(log_filename),
logging.StreamHandler(), TqdmHandler()])
logger = logging.getLogger(__name__)
logger.info(args)
if args.cl_method_name == "none_cl":
debugging_alg = NoneCL(logger=logger)
elif args.cl_method_name == "offline_cl":
debugging_alg = OfflineCL(logger=logger)
elif args.cl_method_name == "simple_cl":
debugging_alg = ContinualFinetuning(logger=logger)
elif args.cl_method_name == "online_ewc":
debugging_alg = OnlineEWC(logger=logger)
elif args.cl_method_name == "offline_debug":
debugging_alg = OfflineDebugger(logger=logger)
elif args.cl_method_name in ["er", "mir"]: # replay only
assert args.replay_frequency > 0
assert args.replay_size > 0
if args.cl_method_name == "mir":
args.use_mir = True
assert args.replay_candidate_size >= args.replay_size
assert args.num_adapt_epochs >= 1 # this is for the virtual update
else:
assert args.num_adapt_epochs <= 0
debugging_alg = MemoryBasedCL(logger=logger)
debugging_alg.name = args.cl_method_name
elif args.cl_method_name == "mbpa":
assert args.num_adapt_epochs > 0
assert args.replay_frequency <= 0
assert args.replay_size <= 0
debugging_alg = MemoryBasedCL(logger=logger)
debugging_alg.name = args.cl_method_name
elif args.cl_method_name == "mbpa++":
assert args.num_adapt_epochs > 0
assert args.replay_frequency > 0
assert args.replay_size > 0
debugging_alg = MemoryBasedCL(logger=logger)
debugging_alg.name = args.cl_method_name
elif args.cl_method_name == "index_cl":
assert args.replay_frequency > 0
assert args.replay_size > 0
assert args.num_adapt_epochs <= 0
debugging_alg = IndexBasedCL(logger=logger)
debugging_alg.name = args.cl_method_name
elif args.cl_method_name == "hyper_cl":
debugging_alg = HyperCL(logger=logger)
elif args.cl_method_name == "simple_ds_mine":
debugging_alg = data_collection.MiningSupervision(logger=logger)
data_args = Namespace(
submission_stream_data=args.submission_stream_data,
stream_id=args.stream_id,
upstream_eval_data=args.upstream_eval_data,
heldout_submission_data=args.heldout_submission_data,
upstream_data_path=args.upstream_data_path,
# sampled_upstream_json_path=args.sampled_upstream_json_path,
# pass_sample_size=args.pass_sample_size,
do_lowercase=args.do_lowercase,
append_another_bos=args.append_another_bos,
max_input_length=args.max_input_length,
max_output_length=args.max_output_length,
task_name=args.task_name,
result_file=args.result_file,
train_batch_size=args.train_batch_size,
predict_batch_size=args.predict_batch_size,
num_beams=args.num_beams,
max_timecode=args.max_timecode,
accumulate_eval_freq=-1,
# use_sampled_upstream=args.use_sampled_upstream,
)
base_model_args = Namespace(
model_type=args.base_model_type,
base_model_path=args.base_model_path
)
if args.cl_method_name in ["none_cl", "offline_cl", "simple_cl", "online_ewc", "er", "mir", "mbpa", "mbpa++", "index_cl", "hyper_cl", "simple_ds_mine"]:
debugger_args = Namespace(
weight_decay=args.weight_decay,
learning_rate=args.learning_rate,
adam_epsilon=args.adam_epsilon,
warmup_steps=0,
total_steps=10000,
num_epochs=args.num_train_epochs,
gradient_accumulation_steps=args.gradient_accumulation_steps,
max_grad_norm=args.max_grad_norm,
diff_loss_weight=args.diff_loss_weight,
save_ckpt_freq=args.save_ckpt_freq,
ckpt_dir=args.ckpt_dir,
skip_instant_eval=args.skip_instant_eval,
kr_eval_freq=args.kr_eval_freq,
kr_eval_mode=args.kr_eval_mode,
okr_sample_size=args.okr_sample_size,
okr_sample_seed=args.okr_sample_seed,
kg_eval_freq=args.kg_eval_freq,
kg_eval_mode=args.kg_eval_mode,
)
if args.cl_method_name == "online_ewc":
setattr(debugger_args, "ewc_lambda", args.ewc_lambda)
setattr(debugger_args, "ewc_gamma", args.ewc_gamma)
elif args.cl_method_name in ["er", "mbpa", "mbpa++", "mir", "index_cl"]:
setattr(debugger_args, "use_replay_mix", args.use_replay_mix)
setattr(debugger_args, "replay_size", args.replay_size)
setattr(debugger_args, "replay_candidate_size", args.replay_candidate_size)
setattr(debugger_args, "replay_frequency", args.replay_frequency)
setattr(debugger_args, "memory_path", args.memory_path)
setattr(debugger_args, "init_memory_cache_path", args.init_memory_cache_path)
setattr(debugger_args, "memory_key_encoder", args.memory_key_encoder)
setattr(debugger_args, "memory_store_rate", args.memory_store_rate)
setattr(debugger_args, "upstream_sample_ratio", args.upstream_sample_ratio)
setattr(debugger_args, "num_adapt_epochs", args.num_adapt_epochs)
setattr(debugger_args, "inference_query_size", args.inference_query_size)
setattr(debugger_args, "local_adapt_lr", args.local_adapt_lr)
if args.cl_method_name == "mir" or args.use_mir:
setattr(debugger_args, "mir_abalation_args", args.mir_abalation_args)
if args.cl_method_name == "index_cl":
setattr(debugger_args, "use_mir", args.use_mir)
setattr(debugger_args, "index_rank_method", args.index_rank_method)
setattr(debugger_args, "indexing_method", args.indexing_method)
setattr(debugger_args, "indexing_args_path", args.indexing_args_path)
elif args.cl_method_name in ["hyper_cl"]:
setattr(debugger_args, "adapter_dim", args.adapter_dim)
setattr(debugger_args, "example_encoder_name", args.example_encoder_name)
setattr(debugger_args, "task_emb_dim", args.task_emb_dim)
return debugging_alg, data_args, base_model_args, debugger_args, logger
def run(args):
debugging_alg, data_args, base_model_args, debugger_args, logger = setup_args(args)
# The Online Debugging Mode + Computing offline debugging bounds.
# setattr(data_args, "data_stream_json_path", args.data_stream_json_path)
# setattr(data_args, "replay_stream_json_path", args.replay_stream_json_path)
debugging_alg.load_data(data_args)
debugging_alg.load_base_model(base_model_args)
debugging_alg.debugger_setup(debugger_args)
debugging_alg.online_debug()
# logger.info(f'output_info["final_eval_results"]={output_info["final_eval_results"]}')
debugging_alg.save_result_file()
logger.info(f"Finished. Results saved to {args.result_file}")
return
def get_cli_parser():
parser = argparse.ArgumentParser()
# base_model_args
parser.add_argument("--base_model_type",
default="facebook/bart-base", required=False)
parser.add_argument(
"--base_model_path",
default="out/mrqa_squad_bart-base_1029_upstream_model/best-model.pt", type=str)
# data_args
parser.add_argument("--submission_stream_data",
default="/path/to/submission_stream")
# this will be used for evaluating forgetting
parser.add_argument("--upstream_eval_data",
default="experiments/eval_data/qa/upstream_eval.v1.jsonl")
parser.add_argument("--heldout_submission_data",
default="experiments/eval_data/qa/heldout_eval.v1.json")
parser.add_argument("--upstream_data_path",
default="data/mrqa_squad/mrqa_squad_train.jsonl")
# default="bug_data/mrqa_naturalquestions.sampled_upstream.jsonl")
parser.add_argument("--task_name", default="mrqa")
# base model args.
parser.add_argument('--train_batch_size', type=int, default=8)
parser.add_argument('--predict_batch_size', type=int, default=16)
parser.add_argument('--num_beams', type=int, default=3)
parser.add_argument("--do_lowercase", action='store_true', default=False)
parser.add_argument("--freeze_embeds", action='store_true', default=False)
parser.add_argument('--max_input_length', type=int, default=888)
parser.add_argument('--max_output_length', type=int, default=50)
parser.add_argument("--append_another_bos", type=int,
default=1) # should be true (1)
# evalaution related
parser.add_argument('--skip_instant_eval', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--use_wandb', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--kr_eval_freq', type=int, default=5)
parser.add_argument('--kr_eval_mode', default="loss") # loss or metric
parser.add_argument('--okr_sample_size', type=int, default=512)
parser.add_argument('--okr_sample_seed', type=int, default=1337)
parser.add_argument('--kg_eval_freq', type=int, default=5)
parser.add_argument('--kg_eval_mode', default="loss") # loss or metric
# feiw-benchmark
# debugger_args
parser.add_argument('--cl_method_name', type=str, default="none_cl",
help="the method name of the continual learning method")
### The HPs for Simple Continual Fine-tuning Method. ###
parser.add_argument("--learning_rate", default=1e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=0.1, type=float,
help="Max gradient norm.")
parser.add_argument("--diff_loss_weight", default=0, type=float,
help="For L2 reg")
parser.add_argument("--gradient_accumulation_steps", default=1, type=int,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
### The HPs for Online EWC Method. ###
parser.add_argument("--ewc_lambda", default=0.5, type=float,
help="Max gradient norm.")
parser.add_argument("--ewc_gamma", default=1, type=float,
help="Max gradient norm.")
# parser.add_argument("--use_sampled_upstream", action='store_true', default=False)
### The HPs for replay-based methods and memory-based.
parser.add_argument('--replay_size', type=int, default=8)
parser.add_argument('--replay_candidate_size', type=int, default=8)
parser.add_argument('--replay_frequency', type=int, default=1) # 1 means always replay for every steps, set to 10 means sample after 10 model updates.
parser.add_argument('--memory_key_encoder', type=str, default="facebook/bart-base")
parser.add_argument('--memory_path', type=str, default="")
parser.add_argument('--init_memory_cache_path', type=str, default="bug_data/memory_key_cache.pkl")
parser.add_argument('--upstream_sample_ratio', type=float, default=-1) #
parser.add_argument('--memory_store_rate', type=float, default=1.0) # 1= always store all examples to the memory.
parser.add_argument('--num_adapt_epochs', type=int, default=1) #
parser.add_argument('--inference_query_size', type=int, default=1) #
parser.add_argument("--use_replay_mix", action='store_true', default=False) # mix the replayed examples with the current error examples.
parser.add_argument('--local_adapt_lr', type=float, default=1e-5) #
# MIR ablation options
parser.add_argument('--mir_abalation_args', type=str, default="none")
# Indexbased CL abalation options
parser.add_argument('--use_mir', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--index_rank_method', type=str, default="most_similar")
parser.add_argument('--indexing_method', type=str, default="bart_index") # bart_index, biencoder
parser.add_argument('--indexing_args_path', type=str, default="exp_results/supervision_data/1012_dm_simple.train_args.json") # bart_index, biencoder
### The HPs for HyperCL
parser.add_argument('--adapter_dim', type=int, default=32) # 1 means always replay for every steps, set to 10 means sample after 10 model updates.
parser.add_argument('--example_encoder_name', type=str, default="roberta-base")
parser.add_argument('--task_emb_dim', type=int, default=768)
### The HPs for offline
parser.add_argument('--offline_retrain_upstream', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
# To save all ckpts.
# I/O parameters
parser.add_argument('--prefix', type=str, default="",
help="Prefix for saving predictions")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--stream_id', type=int, default=0,
help="multiple_streams")
parser.add_argument(
"--result_file", default="bug_data/results.json", type=str)
parser.add_argument("--ckpt_dir", type=str, default="experiments/ckpt_dirs/qa/nonecl",
help="path to all ckpts for saving")
parser.add_argument("--save_ckpt_freq", type=int, default=5, # 0 means no save for the intermidiate . but we always save the final model ckpt.
help="set to 1 if we want all ckpts and eval offline")
# Offline Evaluation Mode in Parallel
parser.add_argument("--num_threads_eval", type=int, default=0,
help="0 means nothing; >0 means the number of gpu threads")
parser.add_argument("--current_thread_id", type=int,
help="0 to num_threads_eval-1")
parser.add_argument("--max_timecode", default=-1, type=int,
help="the maximum timecode to eval")
parser.add_argument("--path_to_thread_result", type=str,
help="the path to save the thread results")
return parser
if __name__ == '__main__':
args = get_cli_parser().parse_args()
if args.use_wandb:
wandb_mode = "online"
else:
wandb_mode = "disabled"
wandb_run = wandb.init(reinit=True, project="error-nlp", mode=wandb_mode, settings=wandb.Settings(start_method="fork"), name=args.prefix)
run_name = wandb.run.name
wandb.config.update(args)
run(args)
|
CMR-main
|
cmr/debug_algs/run_lifelong_finetune.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from cmr.debug_algs.cl_utils import get_top_interfered_examples, local_adaptation, KeyValueMemoryModule
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
from tqdm import tqdm
import random
import numpy as np
import torch
import transformers
from cmr.debug_algs.index_based.index_manager import RandomMemoryManger
from cmr.task_manager.eval_metrics import evaluate_func
import copy
import pickle
import os
from cmr.models.utils import (convert_model_to_single_gpu,
freeze_embeds, trim_batch)
from argparse import Namespace
import more_itertools
import json
class MemoryBasedCL(ContinualFinetuning):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "tbd" # can be er/mbpa/mbpa++
self.upstream_memory_examples = []
def load_data(self, data_args, given_data_stream=None):
super().load_data(data_args, given_data_stream=given_data_stream)
with open(data_args.upstream_data_path) as f:
upstream_memory_examples = [json.loads(line)for line in set(f.read().splitlines())]
self.upstream_memory_examples = self.upstream_data_formatter(upstream_memory_examples)
def _check_debugger_args(self):
super()._check_debugger_args()
required_atts = [
"replay_size",
"replay_candidate_size",
"replay_frequency",
"memory_key_encoder", # 'bert-base-uncased' by default
"memory_store_rate", # 0, 0.1, 1 etc.
"upstream_sample_ratio",
"memory_path", # to save/load the memory module from disk
"init_memory_cache_path",
"num_adapt_epochs",
"inference_query_size",
"local_adapt_lr",
"use_replay_mix",
]
assert all([hasattr(self.debugger_args, att) for att in required_atts])
def debugger_setup(self, debugger_args):
super().debugger_setup(debugger_args)
# Initializing the Key-Value memory module for MBPA++
if self.name in ["er", "mir"]:
self.upstream_memroy_module = RandomMemoryManger(self.logger)
self.memroy_module = RandomMemoryManger(self.logger)
self.logger.info("Prepare the sampled upstream data as the initial memory for the ER and MIR;")
# upstream possible
self.upstream_memroy_module.set_up_initial_memory(formatted_examples=self.upstream_memory_examples)
if self.debugger_args.upstream_sample_ratio < 0:
# mix
self.memroy_module = self.upstream_memroy_module
self.logger.info(f"Initial memroy_module size: {self.memroy_module.get_memory_size()}")
self.logger.info(f"Initial upstream_memroy_module size: {self.upstream_memroy_module.get_memory_size()}")
elif self.name in ["mbpa", "mbpa++"]:
# TODO: prepare the Memory module for it
pass
return
def online_debug(self):
self.logger.info("Start Online Debugging with Dynamic Error Mode")
self.logger.info(f"Number of Batches of Data: {self.num_data_batches}")
self.logger.info(f"Data Batch Size: {self.data_batch_size};")
self.timecode = 0
if self.debugger_args.save_ckpt_freq:
# save the initial model as the 0-th model.
self._save_base_model()
last_steps = 0
for data_eval_loader in tqdm(self.data_eval_loaders, desc="Online Debugging (with Memory Replay)"):
result_dict = {"timecode": self.timecode} # start with 0
self.eval_knowledge_retention(result_dict)
self.eval_knowledge_generalization(result_dict)
############### CORE ###############
# self._replay_based_eval(result_dict)
formatted_bug_examples = self._get_dynamic_errors(
data_eval_loader, result_dict, return_raw_bug_examples=True)
_, bug_eval_loader = self.get_dataloader(self.data_args, formatted_bug_batch=formatted_bug_examples, mode="eval")
examples_to_train = formatted_bug_examples[:]
if self.timecode % self.debugger_args.replay_frequency == 0 \
and self.debugger_args.replay_frequency > 0 and self.debugger_args.replay_size > 0 \
and self.timecode > 0:
# sparse experience replay
self.logger.info("Triggering Sampling from Memory and starting to replay.")
self.logger.info(f"Current memroy_module size: {self.memroy_module.get_memory_size()}.")
self.logger.info(f"Current upstream_memroy_module size: {self.upstream_memroy_module.get_memory_size()}.")
if self.name == "mir":
def mir_retrieve(mm, sample_size):
assert self.debugger_args.replay_candidate_size >= self.debugger_args.replay_size
retrieved_examples_candidates = mm.retrieve_from_memory(
sample_size=min(self.debugger_args.replay_candidate_size, mm.get_memory_size()))
if "mir_buffer_ids" not in result_dict:
result_dict["mir_buffer_ids"] = []
result_dict["mir_buffer_ids"] += [_id for (_input, _truth, _id) in retrieved_examples_candidates]
retrieved_examples = get_top_interfered_examples(self,
K=sample_size, candidate_examples=retrieved_examples_candidates, query_data_loader=bug_train_loader)
return retrieved_examples
# self.logger.info(f"retrieved_examples (mir)={retrieved_examples}")
if self.debugger_args.upstream_sample_ratio > 0:
upstream_sample_budget = int(self.debugger_args.upstream_sample_ratio * self.debugger_args.replay_size)
self.logger.info(f"Memory from upstream_memroy_module = {upstream_sample_budget}; ")
self.logger.info(f"Memory from memroy_module = {self.debugger_args.replay_size-upstream_sample_budget}; ")
retrieved_examples = []
if upstream_sample_budget > 0:
retrieved_examples += mir_retrieve(mm=self.upstream_memroy_module,
sample_size=upstream_sample_budget)
retrieved_examples += mir_retrieve(mm=self.memroy_module,
sample_size=self.debugger_args.replay_size-upstream_sample_budget)
else:
retrieved_examples = mir_retrieve(mm=self.memroy_module,
sample_size=self.debugger_args.replay_size)
else:
if self.debugger_args.upstream_sample_ratio > 0:
upstream_sample_budget = int(self.debugger_args.upstream_sample_ratio * self.debugger_args.replay_size)
self.logger.info(f"Memory from upstream_memroy_module = {upstream_sample_budget}; ")
self.logger.info(f"Memory from memroy_module = {self.debugger_args.replay_size-upstream_sample_budget}; ")
retrieved_examples = []
if upstream_sample_budget > 0:
retrieved_examples += self.upstream_memroy_module.retrieve_from_memory(
sample_size=upstream_sample_budget)
retrieved_examples += self.memroy_module.retrieve_from_memory(
sample_size=self.debugger_args.replay_size-upstream_sample_budget)
else:
retrieved_examples = self.memroy_module.retrieve_from_memory(
sample_size=self.debugger_args.replay_size)
self.base_model.train()
result_dict["retrieved_ids"] = [_id for (_input, _truth, _id) in retrieved_examples]
if self.debugger_args.use_replay_mix:
examples_to_train += retrieved_examples
self.logger.info(f"Mixed the retrieved examples (len={len(retrieved_examples)}) to the current batch for training.")
else:
self.logger.info(f"Replay-Training Start! Using the retrieved examples (len={len(retrieved_examples)}) ")
replay_data_loader, _ = self.get_dataloader(
self.data_args, retrieved_examples, mode="train")
self.fix_bugs(replay_data_loader, quiet=False) # sparse replay
self.logger.info("Replay-Training done.")
last_steps = self.model_update_steps
# Fix the bugs by mini-batch based "training"
self.logger.info(f"Start error-fixing (len(examples_to_train)={len(examples_to_train)}) .... Timecode: {self.timecode}")
bug_train_loader, _ = self.get_dataloader(
self.data_args, examples_to_train, mode="train")
self.fix_bugs(bug_train_loader) # for debugging
self.logger.info("Start error-fixing .... Done!")
# Store to memory
_max = 1000000
# flag_store_examples = bool(random.randrange(0, _max)/_max >=
# 1 - self.debugger_args.memory_store_rate)
flag_store_examples = True
if flag_store_examples:
self.logger.info(f"Saving the current error examples (len={len(formatted_bug_examples)}) to the memory.")
self.logger.info(f"Current memory size: {self.memroy_module.get_memory_size()}.")
self.memroy_module.store_examples(formatted_bug_examples)
self.logger.info(".................. Done.")
############### CORE ###############
self.evaluate_error_fixing(result_dict, bug_eval_loader)
self._update_result_dict(result_dict)
if self.debugger_args.save_ckpt_freq > 0 and self.timecode % self.debugger_args.save_ckpt_freq == 0:
self._save_base_model()
self.save_result_file()
self.logger.info("-"*50)
self.timecode += 1
#### Final evaluation ####
self.final_evaluation()
#### Save the final model ####
self._save_base_model()
# Save to path
self.memroy_module.save_memory_to_path(self.debugger_args.memory_path)
def evaluate(self, eval_dataloader=None, verbose=False):
"""Evaluates the performance"""
if self.name not in ["mbpa", "mbpa++"]:
# ER (no local adpatation).
# This is for the equvilent version of the replay as the baseline (MbPA++ w/o local adaptation when inference or just simple replay.)
return super().evaluate(eval_dataloader, verbose)
if not eval_dataloader:
eval_dataloader = self.submission_eval_loaders[self.timecode]
# TODO: reset the bsz for the local adaptation.
# prepare adapt_dataloaders
adapt_dataloaders = self.get_adapt_dataloaders(eval_dataloader, verbose=True)
predictions = self.base_model_infer_with_adaptation(
eval_dataloader, adapt_dataloaders, verbose)
assert len(predictions) == len(eval_dataloader)
predictions = [p.strip() for p in predictions]
results, return_all = evaluate_func(
predictions, eval_dataloader.data, self.metric, return_all=True)
return predictions, results, return_all
### The Adapatation Related Functions ###
def get_adapt_dataloaders(self, eval_dataloader=None, verbose=False):
"""Get the adapt_dataloader."""
adapt_dataloaders = []
num_batches = len(eval_dataloader.dataloader)
example_batches = np.array_split(eval_dataloader.data, num_batches)
# Only allow retrieving from the past memory. (due to offline evaluation)
past_memory_keys = []
for key, values in self.memroy_module.memory.items():
if values[3]-1 <= self.timecode:
past_memory_keys.append(key)
if not past_memory_keys:
adapt_dataloaders = [None for _ in range(len(example_batches))]
return adapt_dataloaders
past_memory_keys = np.frombuffer(np.asarray(
past_memory_keys), dtype=np.float32).reshape(len(past_memory_keys), -1)
for example_batch in tqdm(example_batches, desc="Retrieving Data from Memory", disable=not verbose):
# self.logger.info("Memory Retrieving ...")
# local adaptation for self.base_model of retrieved examples from memory.
# self.logger.info("Encoding the examples to evaluate...")
keys = self.memroy_module.encode_examples(example_batch)
# self.logger.info("Reading memory to get the KNN examples for local adaptation...")
retrieved_examples = self.memroy_module.query_examples(
keys, past_memory_keys, k=self.debugger_args.inference_query_size)
replay_data_loader, _ = self.get_dataloader(
self.data_args, retrieved_examples, mode="train")
adapt_dataloaders.append(replay_data_loader)
# self.logger.info("Memory Retrieving Done ...")
return adapt_dataloaders
def base_model_infer_with_adaptation(self, eval_dataloader, adapt_dataloaders, verbose=False):
self.base_model.eval()
model = self.base_model if self.n_gpu == 1 else self.base_model.module
predictions = self.inference_with_adaptation(model, eval_dataloader, adapt_dataloaders, save_predictions=False,
verbose=verbose, logger=self.logger, return_all=False, predictions_only=True, args=Namespace(quiet=True))
return predictions
def inference_with_adaptation(self, model, dev_data, adapt_dataloaders, save_predictions=False, verbose=False, args=None, logger=None, return_all=False, predictions_only=False):
# model.eval()
predictions = []
bos_token_id = dev_data.tokenizer.bos_token_id
loss = [] # if needed
if args:
quiet = args.quiet
else:
quiet = False
if not quiet:
logger.info("Starting inference ...")
current_index = 0
for batch in tqdm(dev_data.dataloader, desc="Inference", disable=not verbose):
### Local Adaptation: Start ###
_model = copy.deepcopy(model)
adapt_dataloader = adapt_dataloaders[current_index]
if adapt_dataloader:
# TODO: debug. deactivate this step? then it should be the same as ER.
_model = local_adaptation(self, _model, adapt_dataloader)
pass
### Local Adaptation: End ###
_model.eval()
### Inference: Start ###
if torch.cuda.is_available():
batch = [b.to(torch.device("cuda")) for b in batch]
pad_token_id = dev_data.tokenizer.pad_token_id
batch[0], batch[1] = trim_batch(batch[0], pad_token_id, batch[1])
outputs = _model.generate(input_ids=batch[0],
attention_mask=batch[1],
num_beams=dev_data.args.num_beams,
max_length=dev_data.args.max_output_length,
decoder_start_token_id=_model.config.bos_token_id,
early_stopping=dev_data.gen_early_stop,)
for input_, output in zip(batch[0], outputs):
pred = dev_data.decode(output)
predictions.append(pred)
### Inference: End ###
current_index += 1
del _model
if not quiet:
logger.info("Starting inference ... Done")
if predictions_only:
return predictions
if save_predictions:
dev_data.save_predictions(predictions, )
# logger.info("Starting evaluation metric ...")
result = dev_data.evaluate(predictions, verbose=verbose)
# logger.info("Starting evaluation metric ... Done!")
if return_all:
return predictions, result, loss
return result
|
CMR-main
|
cmr/debug_algs/cl_mbcl_alg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import random
import copy
from cmr.models.mybart import MyBart
from cmr.models import run_bart
import torch
import transformers
from cmr.models.utils import (convert_model_to_single_gpu,
freeze_embeds, trim_batch)
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from tqdm import tqdm
import more_itertools
import pickle
import numpy as np
def get_virtual_updated_model(cl_trainer, query_data_loader):
before_model = copy.deepcopy(cl_trainer.base_model)
virtual_adapt_args = copy.deepcopy(cl_trainer.data_args)
virtual_adapt_args.train_batch_size = 4
# change the batch size for the training.
query_data_loader, _ = cl_trainer.get_dataloader(virtual_adapt_args, query_data_loader.data, mode="train") # fix of the order
after_model = local_adaptation(cl_trainer, before_model, query_data_loader, diff_loss_weight=0)
del before_model
return after_model
def get_top_interfered_examples(cl_trainer, K, candidate_examples, query_data_loader):
"""
This is for the MIR method.
1) use query examples to train current_model for getting a virtual model.
2) test the current_model and the virtual model seperately on the candidate examples
3) compare the loss udpate of each example and rank them by the delta.
4) return the top K examples with the largest positive loss changes.
"""
# assert cl_trainer.name == "mir"
cl_trainer.logger.info(
f"get_top_interfered_examples: len(candidate_examples)={len(candidate_examples)};")
if cl_trainer.debugger_args.mir_abalation_args == "random":
cl_trainer.logger.info(f"ablation mode: randomly sample {K} examples from the candidate_examples")
random.shuffle(candidate_examples)
return candidate_examples[:K]
##################### Prepare the candidate examples as Memory Buffer #####################
mlr_data_args = copy.deepcopy(cl_trainer.data_args)
mlr_data_args.predict_batch_size = 8 # to get the loss for each example # TODO: debug_MIR
# TODO: give the same random seed for selecting the same answer (if there are multiple answers)
# only keep one possible correct answers for computing the loss consistnetly.
candidate_examples_single_ans = _keep_first_answer(candidate_examples)
memory_buffer_loader, _ = cl_trainer.get_dataloader(
mlr_data_args, candidate_examples_single_ans, mode="train", is_training=False) # fix of the order
##################### End #####################
before_model = copy.deepcopy(cl_trainer.base_model)
before_losses = run_bart.inference(
before_model, memory_buffer_loader, compute_loss=True, loss_only=True, logger=cl_trainer.logger)
if cl_trainer.debugger_args.mir_abalation_args == "largest_beforeloss":
after_losses = before_losses
else:
# virtual udpate
virtual_adapt_args = copy.deepcopy(cl_trainer.data_args)
virtual_adapt_args.train_batch_size = 4
# change the batch size for the training.
query_data_loader, _ = cl_trainer.get_dataloader(virtual_adapt_args, query_data_loader.data, mode="train") # fix of the order
after_model = local_adaptation(cl_trainer, before_model, query_data_loader, diff_loss_weight=0)
after_losses = run_bart.inference(
after_model, memory_buffer_loader, compute_loss=True, loss_only=True, logger=cl_trainer.logger)
# cl_trainer.logger.info(
# f"len(before_losses)={len(before_losses)}; len(after_losses)={len(after_losses)};")
assert len(before_losses) == len(after_losses) == len(candidate_examples)
# cl_trainer.logger.info(f"candidate_examples IDs: {[x[2] for x in candidate_examples]}")
# it's a virtual update and we need to recover it.
# del cl_trainer.base_model
# del after_model
# cl_trainer.base_model = before_model
interference_scores = []
for example, before_loss, after_loss in zip(candidate_examples, before_losses, after_losses):
if cl_trainer.debugger_args.mir_abalation_args == "largest_afterloss":
loss_delta = after_loss # only for debugging MIR; biggest losers afterwards
elif cl_trainer.debugger_args.mir_abalation_args == "largest_beforeloss":
loss_delta = before_loss
else:
# standard MIR
loss_delta = after_loss - before_loss
interference_scores.append((example, loss_delta))
# cl_trainer.logger.info(f"before_losses={before_losses}")
# cl_trainer.logger.info(f"after_losses={after_losses}")
# cl_trainer.logger.info(f"interference_scores={[x[1] for x in interference_scores]}")
interference_scores.sort(key=lambda x: x[1], reverse=True)
if cl_trainer.debugger_args.mir_abalation_args == "reverse":
interference_scores.reverse() # only for debugging MIR. it's actually reverse=Yes
top_K_examples = [x[0] for x in interference_scores][:K]
# cl_trainer.logger.info(f"retrieved candidates ids = {[x[2] for x in top_K_examples]}")
del before_model
del before_losses
del after_model
del after_losses
del memory_buffer_loader
return top_K_examples
def local_adaptation(cl_trainer, model, adapt_dataloader, diff_loss_weight=1e-3):
pad_token_id = cl_trainer.tokenizer.pad_token_id
base_weights = list(cl_trainer.base_model.parameters())
curr_weights = list(model.parameters())
global_step = 0
pad_token_id = cl_trainer.tokenizer.pad_token_id
# super().debugger_setup(cl_trainer.debugger_args) # reset the optimizier and schduler
model.train()
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': cl_trainer.debugger_args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=cl_trainer.debugger_args.local_adapt_lr, eps=cl_trainer.debugger_args.adam_epsilon)
# TODO: double check the decision about warm up for fine-tuning
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=cl_trainer.debugger_args.warmup_steps,
num_training_steps=cl_trainer.debugger_args.total_steps)
for epoch_id in range(int(cl_trainer.debugger_args.num_adapt_epochs)):
for batch in tqdm(adapt_dataloader.dataloader, desc=f"Local Adaptation Epoch {epoch_id}", disable=False):
global_step += 1
if cl_trainer.use_cuda:
# print(type(batch[0]), batch[0])
batch = [b.to(torch.device("cuda")) for b in batch]
batch[0], batch[1] = trim_batch(
batch[0], pad_token_id, batch[1])
batch[2], batch[3] = trim_batch(
batch[2], pad_token_id, batch[3])
# this is the task loss w/o any regularization
loss = model(input_ids=batch[0], attention_mask=batch[1],
decoder_input_ids=batch[2], decoder_attention_mask=batch[3],
is_training=True)
if cl_trainer.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if diff_loss_weight != 0:
diff_loss = torch.Tensor([0]).to("cuda" if torch.cuda.is_available() else "cpu")
# Iterate over base_weights and curr_weights and accumulate the euclidean norm
# of their differences
for base_param, curr_param in zip(base_weights, curr_weights):
diff_loss += (curr_param - base_param).pow(2).sum()
loss = loss + diff_loss_weight * diff_loss
loss.backward()
if global_step % cl_trainer.debugger_args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), cl_trainer.debugger_args.max_grad_norm)
optimizer.step() # We have accumulated enough gradients
scheduler.step()
model.zero_grad()
return model
def _keep_first_answer(examples_with_multiple_ans):
examples_with_single_ans = []
for item in examples_with_multiple_ans:
examples_with_single_ans.append((item[0], item[1][0:1], item[2]))
return examples_with_single_ans
class KeyValueMemoryModule(object):
def __init__(self, logger):
self.logger = logger
self.memory = {}
self.keys_over_time = {}
self.memory_key_cache = {}
self.memory_key_encoder = ""
def load_key_encoder(self, memory_key_encoder='facebook/bart-base'):
# https://huggingface.co/transformers/model_doc/bart.html#bartmodel
# TODO: consider the SentenceBERT-like sentence encoders.
self.memory_key_encoder = memory_key_encoder
self.logger.info(
f"Starting to load the key encoder ({memory_key_encoder}) for the memory module.")
if "bart" in memory_key_encoder.lower():
self.tokenizer = transformers.BartTokenizer.from_pretrained(memory_key_encoder)
self.key_encoder = transformers.BartModel.from_pretrained(memory_key_encoder)
elif "distilbert" in memory_key_encoder.lower():
self.tokenizer = transformers.DistilBertTokenizer.from_pretrained(memory_key_encoder)
self.key_encoder = transformers.DistilBertModel.from_pretrained(memory_key_encoder)
elif "roberta" in memory_key_encoder.lower():
self.key_encoder = transformers.RobertaModel.from_pretrained(memory_key_encoder)
self.tokenizer = transformers.RobertaTokenizer.from_pretrained(memory_key_encoder)
elif "bert" in memory_key_encoder.lower():
self.key_encoder = transformers.BertModel.from_pretrained(memory_key_encoder)
self.tokenizer = transformers.BertTokenizer.from_pretrained(memory_key_encoder)
self.key_encoder.cuda()
self.logger.info(f"Finished.")
return self.key_encoder, self.tokenizer
def get_key_content(self, inputs):
key_texts = []
trigger_str = "Question: "
for _input in inputs:
start_ind = _input.index(trigger_str) + len(trigger_str)
key_texts.append(_input[start_ind:])
return key_texts
def load_memory_key_cache(self, init_memory_cache_path):
if os.path.exists(init_memory_cache_path):
self.logger.info(f"Loading init_memory_cache_path from {init_memory_cache_path}")
with open(init_memory_cache_path, "rb") as f:
self.memory_key_cache = pickle.load(f)[self.memory_key_encoder]
else:
self.logger.info(f"Initializing an empty memory key cache.")
self.memory_key_cache = None
def encode_examples_for_caching(self, all_examples, batch_size=1, return_tensors=False):
"""
Return key representation of the documents
"""
# Freeze the weights of the key network to prevent key
# representations from drifting as data distribution changes
# with torch.no_grad():
# last_hidden_states, _
# = self.key_encoder(contents, attention_mask=attn_masks)
# Obtain key representation of every text content by selecting the its [CLS] hidden representation
# keys = last_hidden_states[:, 0, :]
all_vectors = {}
all_tensors = []
batches = list(more_itertools.chunked(all_examples, batch_size))
for examples in tqdm(batches, desc="Caching the examples"):
inputs = [d[0] for d in examples]
with torch.no_grad():
# only use the questions as the key text for encoding.
key_texts = self.get_key_content(inputs)
inputs = self.tokenizer.batch_encode_plus(
key_texts, return_tensors="pt", pad_to_max_length=True)
input_ids = inputs["input_ids"].to(torch.device("cuda"))
attention_mask = inputs["attention_mask"].to(torch.device("cuda"))
# last_hidden_states, _ = self.key_encoder(**inputs)
results = self.key_encoder(input_ids, attention_mask)
last_hidden_states = results[0]
key_vectors = last_hidden_states[:, 0, :]
key_vectors_npy = key_vectors.cpu().numpy()
all_tensors += list(key_vectors)
for key_text, key_vector in zip(key_texts, key_vectors_npy):
all_vectors[key_text] = key_vector
if return_tensors:
return all_tensors
return all_vectors
def encode_examples(self, examples, use_random_keys=False):
"""
Return key representation of the documents
"""
inputs = [d[0] for d in examples]
# only use the questions as the key text for encoding.
key_texts = self.get_key_content(inputs)
key_vectors = None
if use_random_keys:
self.logger.info("Using randomly generated memory keys for ER and MIR.")
key_vectors = np.random.rand(len(examples), 128)
return key_vectors
if self.memory_key_cache:
# self.logger.info("Using the cache.")
key_vectors = []
for key_text in key_texts:
assert key_text in self.memory_key_cache, key_text
key_vectors.append(self.memory_key_cache[key_text])
else:
# on the fly
with torch.no_grad():
inputs = self.tokenizer.batch_encode_plus(
key_texts, return_tensors="pt", pad_to_max_length=True)
input_ids = inputs["input_ids"].to(torch.device("cuda"))
attention_mask = inputs["attention_mask"].to(torch.device("cuda"))
# last_hidden_states, _ = self.key_encoder(**inputs)
results = self.key_encoder(input_ids, attention_mask)
last_hidden_states = results[0]
key_vectors = last_hidden_states[:, 0, :]
key_vectors = key_vectors.cpu().numpy()
return key_vectors
def store_examples(self, keys, examples, timecode=0):
"""
Add the examples as key-value pairs to the memory dictionary with content,attention_mask,label tuple as value
and key determined by key network
"""
assert len(keys) == len(examples)
# update the memory dictionary
for i, key in enumerate(keys):
# numpy array cannot be used as key since it is non-hashable, hence convert it to bytes to use as key.
values = list(examples[i])
values.append(timecode)
self.memory.update({key.tobytes(): tuple(values)})
def query_examples(self, keys, past_memory_keys, k=32):
"""
Returns samples from buffer using K-nearest neighbour approach
"""
retrieved_examples = []
# Iterate over all the input keys
# to find neigbours for each of them
k = min(k, len(past_memory_keys))
for key in keys:
# compute similarity scores based on Euclidean distance metric
similarity_scores = np.dot(past_memory_keys, key.T)
K_neighbour_keys = past_memory_keys[np.argpartition(similarity_scores, -k)[-k:]]
neighbours = [self.memory[nkey.tobytes()] for nkey in K_neighbour_keys]
# converts experiences into batch
# retrieved_examples.append(neighbours)
retrieved_examples += neighbours
# self.logger.info(f"Retrieved {len(retrieved_examples)} examples from memory; {len(retrieved_examples)/len(keys)} examples per key.")
return retrieved_examples
def random_sample(self, sample_size):
sample_size = min(len(self.memory), sample_size)
keys = random.sample(list(self.memory), sample_size)
_inputs = [self.memory[k][0] for k in keys]
_outputs = [self.memory[k][1] for k in keys]
_ids = [self.memory[k][2] for k in keys]
# _timecodes = [self.memory[k][3] for k in keys]
examples = list(zip(_inputs, _outputs, _ids))
return examples
def save_memory_to_path(self, memory_path):
if self.memory is not None:
with open(memory_path, "wb") as f:
self.logger.info(f"Saving the memory to {memory_path}")
pickle.dump(self.memory, f)
def load_memory_from_path(self, memory_path):
if os.path.exists(memory_path):
with open(memory_path, "rb") as f:
self.logger.info(f"Loading the memory from {memory_path}")
self.memory = pickle.load(f)
total_keys = len(self.memory.keys())
# convert the keys from np.bytes to np.float32
self.all_keys = np.frombuffer(
np.asarray(list(self.memory.keys())), dtype=np.float32).reshape(total_keys, -1)
else:
self.logger.info(f"Warning: {memory_path} doesn't exist.")
def KVMemory_init():
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
import argparse
import json
import logging
parser = argparse.ArgumentParser()
parser.add_argument('--memory_key_encoder', type=str, default="facebook/bart-base")
parser.add_argument('--init_memory_cache_path', type=str,
default="bug_data/memory_key_cache.pkl")
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument("--bug_stream_json_path",
default="bug_data/mrqa_naturalquestions_dev.static_bug_stream.json")
parser.add_argument("--upstream_eval_data",
default="bug_data/mrqa_naturalquestions_dev.sampled_pass.jsonl")
parser.add_argument("--sampled_upstream_json_path",
default="bug_data/mrqa_naturalquestions.sampled_upstream.jsonl")
args = parser.parse_args()
log_filename = f'logs/memory_cache_building_{args.memory_key_encoder.replace("/", "_")}.log'
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
handlers=[logging.FileHandler(log_filename),
logging.StreamHandler()])
logger = logging.getLogger(__name__)
logger.info(args)
cl_trainer = ContinualFinetuning(logger)
# Load bugs
with open(args.bug_stream_json_path) as f:
bug_stream = json.load(f)
all_examples = []
for bug_batch in tqdm(bug_stream, desc="Creating the bug data loaders."):
formatted_bug_batch = cl_trainer.data_formatter(bug_batch)
all_examples += formatted_bug_batch
# Load pass cases
with open(args.upstream_eval_data) as f:
pass_examples = [json.loads(line) for line in set(f.read().splitlines())]
all_examples += cl_trainer.data_formatter(pass_examples)
memory_module = KeyValueMemoryModule(logger)
logger.info(f"All examples: {len(all_examples)}")
memory_module.load_key_encoder(memory_key_encoder=args.memory_key_encoder)
all_key_vectors = memory_module.encode_examples_for_caching(
all_examples, batch_size=args.batch_size)
logger.info(
f"all_key_vectors.shape: {len(all_key_vectors)} x {len(all_key_vectors[list(all_key_vectors.keys())[0]])}")
if os.path.exists(args.init_memory_cache_path):
with open(args.init_memory_cache_path, "rb") as f:
memory_key_cache = pickle.load(f)
else:
memory_key_cache = {}
memory_key_cache[args.memory_key_encoder] = all_key_vectors
with open(args.init_memory_cache_path, "wb") as f:
pickle.dump(memory_key_cache, f)
logger.info(f"Saved the cache to {f.name}")
|
CMR-main
|
cmr/debug_algs/cl_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import copy
import logging
import random
from cmr.debug_algs.cl_utils import _keep_first_answer
from cmr.models import run_bart
from cmr.task_manager.eval_metrics import evaluate_func
import torch
from transformers import BartTokenizer, BartConfig
import json
from tqdm import tqdm
import os
import numpy as np
import wandb
def _pack_as_dict(predictions, results, results_all):
return {"predictions": predictions, "metric_results": results, "metric_results_detailed": results_all}
class OnlineDebuggingMethod():
def __init__(self, logger=None):
self.name = "base_class"
# logger
self.logger = logger
# args
self.debugger_args = None
self.base_model_args = None
self.data_args = None
# modules
self.base_model = None
self.debugger = None
# data
self.num_bug_batches = None
self.bug_batch_size = None
self.submission_eval_loaders = [] # for online dynamic streams
self.upstream_eval_loader = None # for UKR
self.heldout_submission_eval_loader = None # for KG eval
# utils
self.use_cuda = torch.cuda.is_available()
self.tokenizer = BartTokenizer.from_pretrained("bart-large")
self.timecode = None
self.metric = "EM|QA-F1"
# for dynamic stream mode
self.data_eval_loaders = []
self.online_eval_results = []
self.last_OKR = None; self.last_UKR = None; self.last_KG = None
if self.use_cuda:
self.n_gpu = torch.cuda.device_count()
else:
self.n_gpu = 0
self.model_update_steps = 0 # number of updates over the base model.
self.past_errors = []
self.past_submissions = []
return
def save_result_file(self):
output_info = {}
output_info["method_class"] = self.name
output_info["base_model_args"] = str(self.base_model_args)
output_info["debugger_args"] = str(self.debugger_args)
output_info["data_args"] = str(self.data_args)
output_info["model_update_steps"] = self.model_update_steps
output_info["online_eval_results"] = self.online_eval_results
# if args.cl_method_name in ["offline_debug"]:
# output_info["offline_bound_results"] = offline_bound_results
# logger.info(f"eval_results_overall_bug: {offline_bound_results['eval_results_overall_bug']['metric_results']}")
# logger.info(f"eval_results_overall_forget: {offline_bound_results['eval_results_overall_forget']['metric_results']}")
with open(self.data_args.result_file, "w") as f:
json.dump(output_info, f)
self.logger.info(f"Updated result file: {self.data_args.result_file} at Timecode: {self.timecode}.")
def _check_data_args(self, additional_args=[]):
required_atts = ["submission_stream_data",
"stream_id",
"upstream_eval_data",
"heldout_submission_data",
"do_lowercase",
"append_another_bos",
"max_input_length",
"max_output_length",
"task_name",
"num_beams",
"max_timecode",
"result_file"] + additional_args
assert all([hasattr(self.data_args, att) for att in required_atts])
return
def load_data(self, data_args, given_data_stream=None):
""""For loading the data stream for dynamic building the errors."""
self.data_args = data_args
self._check_data_args() # additional_args=["data_stream_json_path", "accumulate_eval_freq"]
# Load bug stream
if given_data_stream:
data_stream = given_data_stream
else:
with open(data_args.submission_stream_data) as f:
data_stream = json.load(f)[data_args.stream_id]
self.logger.info(f"Loading the stream from {f.name} and use the ${data_args.stream_id} part.")
self.data_stream = data_stream
self.num_data_batches = len(data_stream)
self.data_batch_size = len(data_stream[0])
# Create data loaders for each error batch.
all_formatted_data = []
self.data_eval_loaders = []
self.online_eval_results = []
for data_batch in tqdm(self.data_stream, desc="Creating the data loaders."):
if data_args.max_timecode > 0 and len(self.data_eval_loaders) >= data_args.max_timecode:
break
formatted_data_batch = self.data_formatter(data_batch)
all_formatted_data += formatted_data_batch
_, eval_data_dataloader = self.get_dataloader(
data_args, formatted_data_batch, mode="eval")
self.data_eval_loaders.append(eval_data_dataloader)
self.all_formatted_data = all_formatted_data
# Create loaders for the sampled pass examples for evaluation.
with open(data_args.upstream_eval_data) as f:
upstream_eval_examples = [json.loads(line) for line in f.read().splitlines()]
upstream_eval_examples = self.data_formatter(upstream_eval_examples)
self.logger.info(f"load_data: len(upstream_eval_examples)={len(upstream_eval_examples)}")
_, self.upstream_eval_loader = self.get_dataloader(
data_args, upstream_eval_examples, mode="eval")
# Create loaders for the sampled pass examples for evaluation.
with open(data_args.heldout_submission_data) as f:
heldout_eval_examples = [json.loads(line) for line in f.read().splitlines()]
heldout_eval_examples = self.data_formatter(heldout_eval_examples)
self.logger.info(f"load_data: len(heldout_eval_examples)={len(heldout_eval_examples)}")
_, self.heldout_submission_eval_loader = self.get_dataloader(
data_args, heldout_eval_examples, mode="eval")
def _get_dynamic_errors(self, data_eval_loader, result_dict, return_raw_bug_examples=False):
############### Get the errors dynamically. ###############
self.logger.info(
f"Evaluating to get errors .... Timecode: {self.timecode}")
self.past_submissions += data_eval_loader.data
predictions, results, results_all = self.evaluate(data_eval_loader)
self.logger.info(f"Before Error Fixing: {results}")
# self.logger.info(
# f"Doing-Nothing Instant EM: {self.instant_doing_nothing_EM[self.timecode]}")
### Pack the error examples for training. ###
errors = []
error_ids = []
for (_input, _truth, _id), prediction, em, f1 in zip(data_eval_loader.data,
predictions,
results_all["EM"],
results_all["QA-F1"]):
# self.logger.info(f"{example}")
# self.logger.info(f"{prediction}")
# self.logger.info(f"{em}")
if em == 0: # TODO: this is the condition to judge if it is a bug.
bug = {}
bug["id"] = _id
bug["input"] = _input
bug["truth"] = _truth
bug["mistake"] = prediction
errors.append(bug)
error_ids.append(_id)
self.past_errors.append(bug)
formatted_bug_batch = self.data_formatter(errors)
self.logger.info(f"Found {len(formatted_bug_batch)} errors.")
SR = 1 - len(error_ids)/len(predictions)
CSR = 1 - len(self.past_errors) / len(self.past_submissions)
wandb.log({"num_errors": len(formatted_bug_batch)}, step=self.timecode)
wandb.log({"CSR": CSR}, step=self.timecode)
wandb.log({"SR": SR}, step=self.timecode)
result_dict["before_eval_results"] = _pack_as_dict(predictions, results, results_all)
result_dict["before_error_ids"] = error_ids
result_dict["SR"] = SR
result_dict["CSR"] = CSR
if return_raw_bug_examples:
return formatted_bug_batch
else:
bug_train_loader, bug_eval_loader = self.get_dataloader(
self.data_args, formatted_bug_batch, mode="both")
return bug_train_loader, bug_eval_loader
def _update_result_dict(self, result_dict):
# if self.last_OKR is None or self.last_KG is None or self.last_UKR is None:
# pass
# else:
scores = [result_dict.get("CSR", 0.0), result_dict.get("EFR", 0.0)]
if self.last_OKR:
scores.append(self.last_OKR)
scores.append(self.last_UKR)
scores.append(self.last_KG)
result_dict["Overall"] = float(np.mean(scores))
wandb.log({"Overall": result_dict["Overall"]}, step=self.timecode)
self.logger.info(f'Overall: {result_dict["Overall"]} from scores={scores}')
self.online_eval_results.append(result_dict)
def online_debug(self):
self.logger.info("Start Online Debugging with Dynamic Error Mode")
self.logger.info(f"Number of Batches of Data: {self.num_data_batches}")
self.logger.info(f"Data Batch Size: {self.data_batch_size};")
self.timecode = 0
if self.debugger_args.save_ckpt_freq:
# save the initial model as the 0-th model.
self._save_base_model()
for data_eval_loader in tqdm(self.data_eval_loaders, desc="Online Debugging"):
result_dict = {"timecode": self.timecode} # start with 0
self.eval_knowledge_retention(result_dict)
self.eval_knowledge_generalization(result_dict)
# self._replay_based_eval(result_dict)
bug_train_loader, bug_eval_loader = self._get_dynamic_errors(data_eval_loader, result_dict)
############### CORE ###############
# Fix the bugs by mini-batch based "training"
self.logger.info(f"Start error-fixing .... Timecode: {self.timecode}")
self.fix_bugs(bug_train_loader) # for debugging
self.logger.info("Start error-fixing .... Done!")
############### CORE ###############
self.evaluate_error_fixing(result_dict, bug_eval_loader)
self._update_result_dict(result_dict)
if self.debugger_args.save_ckpt_freq > 0 and self.timecode % self.debugger_args.save_ckpt_freq == 0:
self._save_base_model()
self.save_result_file()
self.logger.info("-"*50)
self.timecode += 1
#### Final evaluation ####
self.final_evaluation()
#### Save the final model ####
self._save_base_model()
def final_evaluation(self):
self.logger.info("Start the final evaluation.")
# TODO:
self.logger.info("Nothing here.")
def eval_knowledge_retention(self, result_dict):
if self.timecode == self.data_args.max_timecode-1:
pass
elif self.timecode % self.debugger_args.kr_eval_freq == 0:
pass
else:
return
######################## UKR ########################
self.logger.info(f"Start eval_knowledge_retention for UKR @ Timecode={self.timecode}")
if self.debugger_args.kr_eval_mode == "loss":
UKR_loss = self.evaluate(self.upstream_eval_loader, mode="loss")
elif self.debugger_args.kr_eval_mode == "metric":
predictions, results, results_all = self.evaluate(self.upstream_eval_loader)
scores = results_all["EM"]
UKR = len([1 for s in scores if s == 1]) / len(scores)
result_dict["UKR"] = UKR
wandb.log({"UKR": UKR}, step=self.timecode)
self.last_UKR = UKR
# UKR_loss = self.evaluate(self.upstream_eval_loader, mode="loss")
# wandb.log({"UKR_loss": UKR_loss}, step=self.timecode)
self.logger.info(f"Upstream Knowledge Retation (UKR@{self.timecode}): {UKR:.4f}")
######################## OKR ########################
if not self.past_submissions:
return
rng = random.Random(self.debugger_args.okr_sample_seed) # fixed for different methods e.g., 1337
if len(self.past_submissions) < self.debugger_args.okr_sample_size:
self.logger.info(f"len(self.past_submissions) = {len(self.past_submissions)} \
< self.debugger_args.okr_sample_size = {self.debugger_args.okr_sample_size}")
return
sampled_past_submissions = rng.sample(self.past_submissions, k=self.debugger_args.okr_sample_size)
result_dict["OKR_sampled_ids"] = [_id for _input, _truth, _id in sampled_past_submissions]
result_dict["OKR_sampled_ids"].sort()
_, past_submission_eval_loader = self.get_dataloader(self.data_args, sampled_past_submissions, mode="eval")
self.logger.info(f"Start eval_knowledge_retention for OKR @ Timecode={self.timecode}")
if self.debugger_args.kr_eval_mode == "loss":
OKR = self.evaluate(past_submission_eval_loader, mode="loss")
elif self.debugger_args.kr_eval_mode == "metric":
predictions, results, results_all = self.evaluate(past_submission_eval_loader)
scores = results_all["EM"]
OKR = len([1 for s in scores if s == 1]) / len(scores)
self.logger.info(f"Online Knowledge Retation (OKR@{self.timecode}): {OKR:.4f}")
result_dict["OKR"] = OKR
self.last_OKR = OKR
wandb.log({"OKR": OKR}, step=self.timecode)
def eval_knowledge_generalization(self, result_dict):
if self.timecode == self.data_args.max_timecode-1:
pass
elif self.timecode % self.debugger_args.kg_eval_freq == 0:
pass
else:
return
######################## KG ########################
self.logger.info(f"Start eval_knowledge_generalization for KG @ Timecode={self.timecode}")
if self.debugger_args.kg_eval_mode == "loss":
KG_loss = self.evaluate(self.heldout_submission_eval_loader, mode="loss")
elif self.debugger_args.kg_eval_mode == "metric":
# TODO: get a decomposed version?
predictions, results, results_all = self.evaluate(self.heldout_submission_eval_loader)
scores = results_all["EM"]
KG = len([1 for s in scores if s == 1]) / len(scores)
result_dict["KG"] = KG
wandb.log({"KG": KG}, step=self.timecode)
self.last_KG = KG
self.logger.info(f"Future Knowledge Generalization (KG@{self.timecode}): {KG:.4f}")
def evaluate_error_fixing(self, result_dict, bug_eval_loader):
after_predictions, after_results, after_results_all = self.evaluate(bug_eval_loader)
fixed_ids = []
unfixed_ids = []
for (_input, _truth, _id), score_after in zip(bug_eval_loader.data, after_results_all["EM"]):
if score_after == 1:
fixed_ids.append(_id)
else:
unfixed_ids.append(_id)
EFR = len(fixed_ids) / len(fixed_ids+unfixed_ids)
result_dict["EFR"] = EFR
wandb.log({"EFR": EFR}, step=self.timecode)
self.logger.info(f"EFR={EFR}")
return EFR
# So the 0-th checkpoint should be the original base model.
def _save_base_model(self, ckpt_name=None):
output_dir = self.debugger_args.ckpt_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=True)
model_state_dict = {k: v.cpu() for (
k, v) in self.base_model.state_dict().items()}
if ckpt_name:
model_path = os.path.join(output_dir, f"model_ckpt_{ckpt_name}.pt")
else:
model_path = os.path.join(
output_dir, f"model_ckpt_{self.timecode:03d}.pt")
torch.save(model_state_dict, model_path)
self.logger.info(f"Model saved to {model_path}.")
def evaluate(self, eval_dataloader=None, verbose=False, mode="metric"):
"""Evaluates the performance"""
if not eval_dataloader:
self.logger.info("evaluate with submission eval loaders")
eval_dataloader = self.submission_eval_loaders[self.timecode]
if mode == "metric":
predictions = self.base_model_infer(eval_dataloader, verbose)
assert len(predictions) == len(eval_dataloader)
predictions = [p.strip() for p in predictions]
results, results_all = evaluate_func(
predictions, eval_dataloader.data, self.metric, return_all=True)
return predictions, results, results_all
elif mode == "loss":
examples = eval_dataloader.data
_examples = _keep_first_answer(examples)
tmp_data_args = copy.deepcopy(self.data_args)
tmp_data_args.predict_batch_size = 8 # TODO: set an arg.
eval_loader, _ = self.get_dataloader(tmp_data_args, _examples, mode="train", is_training=False) # fix of the order
losses = run_bart.inference(
self.base_model, eval_loader, compute_loss=True, loss_only=True, logger=self.logger)
mean_loss = sum(losses) / len(examples)
return mean_loss
def base_model_infer(self, eval_dataloader, verbose):
raise NotImplementedError(
"Please Implement the `base_model_infer` method in your class.")
def check_debugger_args(self):
raise NotImplementedError(
"Please Implement the `check_debugger_args` method in your class.")
def data_formatter(self, bug_batch):
raise NotImplementedError(
"Please Implement the `data_formatter` method in your class.")
def get_dataloader(self, data_args, formatted_bug_batch):
raise NotImplementedError(
"Please Implement the `get_dataloader` method in your class.")
def load_base_model(self, base_model_args, mode="online_debug"):
raise NotImplementedError(
"Please Implement the `load_base_model` method in your class.")
def debugger_setup(self):
raise NotImplementedError(
"Please Implement the `debugger_setup` method in your class.")
def fix_bugs(self, bug_loader, quiet=True):
raise NotImplementedError(
"Please Implement the `fix_bugs` method in your class.")
def upstream_data_formatter(self, examples):
# The continual fine-tuning method only uses the correct answers for fixing bugs.
formatted_examples = []
for example in examples:
_id = example["id"]
_input = example["input"]
_truth = example["output"] # a list of answers
formatted_examples.append((_input, _truth, _id))
return formatted_examples
|
CMR-main
|
cmr/debug_algs/commons.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# TODO: remove this as we have the offline evaluation function now.
def _eval_before_fixing(self):
# Before Bug-Fixing
assert self.online_debug_results is not None
bug_eval_loader = self.bug_eval_loaders[self.timecode]
bug_before_predictions, bug_before_results, bug_before_results_all = self.evaluate(
bug_eval_loader)
self.logger.info("-"*10+f"Timecode: {self.timecode}"+"-"*10)
self.logger.info(
f"Before Bug-fixing the results on bug-batch-{self.timecode} = {bug_before_results}")
if len(self.online_debug_results["res_on_passes"]) == 0:
pass_before_predictions, pass_before_results, pass_before_results_all = self.evaluate(
self.forget_eval_loader)
self.online_debug_results["res_on_passes"].append(
(pass_before_results, pass_before_results_all))
else:
pass_before_predictions = None # TODO:
pass_before_results, pass_before_results_all = self.online_debug_results[
"res_on_passes"][-1]
self.logger.info(
f"Before Bug-fixing the results on the sampled pass cases = {pass_before_results}")
return bug_before_results, bug_before_results_all, pass_before_results, pass_before_results_all
# TODO: remove this as we have the offline evaluation function now.
def _eval_after_fixing(self, bug_before_results, bug_before_results_all, pass_before_results, pass_before_results_all):
# After Bug-Fixing
assert self.online_debug_results is not None
bug_eval_loader = self.bug_eval_loaders[self.timecode]
bug_after_predictions, bug_after_results, bug_after_results_all = self.evaluate(
bug_eval_loader)
self.logger.info(
f"After Bug-fixing the results on bug-batch-{self.timecode} = {bug_after_results}")
pass_after_predictions, pass_after_results, pass_after_results_all = self.evaluate(
self.forget_eval_loader)
self.logger.info(
f"After Bug-fixing the results on the sampled pass cases = {pass_after_results}")
# Log the overall results
self.online_debug_results["res_on_bugs"].append(
(bug_before_results, bug_after_results))
self.online_debug_results["res_on_passes"].append(
(pass_after_results, pass_after_results_all))
self._check_fixing(
bug_eval_loader, bug_before_results_all, bug_after_results_all)
self._check_forgetting(pass_before_results_all, pass_after_results_all)
if self.debugger_args.overtime_overall_bug_eval:
all_bug_after_predictions, all_bug_after_results, all_bug_after_results_all = self.evaluate(
self.bug_all_eval_loader)
self.logger.info(
f"Current Overall Bug-fixing Results = {all_bug_after_results}")
self.online_debug_results["overtime_all_bug_eval"].append(
all_bug_after_results)
# TODO: remove this as we have the offline evaluation function now.
def _eval_overall_bugs(self):
all_bug_after_predictions, all_bug_after_results, all_bug_after_results_all = self.evaluate(
self.bug_all_eval_loader)
self.online_debug_results["final_all_bug_eval"] = all_bug_after_results
self.logger.info(
f"Final Overall Bug-fixing Results = {all_bug_after_results}")
# TODO: move to evaluation analysis part.
def _check_fixing(self, bug_eval_loader, bug_before_results_all, bug_after_results_all):
# Log the specific fixed bugs and forget examples
em_prefixed_bugs = []
f1_prefixed_bugs = []
em_fixed_bugs = []
f1_fixed_bugs = []
assert len(bug_eval_loader.data) == len(
bug_before_results_all["EM"]) == len(bug_after_results_all["EM"])
for ind in range(len(bug_eval_loader.data)):
em_before = bug_before_results_all["EM"][ind]
em_after = bug_after_results_all["EM"][ind]
f1_before = bug_before_results_all["QA-F1"][ind]
f1_after = bug_after_results_all["QA-F1"][ind]
uuid = bug_eval_loader.data[ind][2] # (input, output, uuid)
if em_before == 1:
em_prefixed_bugs.append(uuid)
if f1_after > 0.5:
f1_prefixed_bugs.append(uuid)
if em_before == 0 and em_after == 1:
em_fixed_bugs.append(uuid)
if f1_before < 0.5 and f1_after > 0.5 and f1_after-f1_before >= 0.25:
f1_fixed_bugs.append(uuid)
self.online_debug_results["em_fixed_bugs"].append(em_fixed_bugs)
self.online_debug_results["f1_fixed_bugs"].append(f1_fixed_bugs)
self.online_debug_results["em_prefixed_bugs"].append(em_prefixed_bugs)
self.online_debug_results["f1_prefixed_bugs"].append(f1_prefixed_bugs)
self.logger.info(
f"Number of em_prefixed_bugs = {len(em_prefixed_bugs)}; Number of f1_prefixed_bugs = {len(f1_prefixed_bugs)}")
self.logger.info(
f"Number of em_fixed_bugs = {len(em_fixed_bugs)}; Number of f1_fixed_bugs = {len(f1_fixed_bugs)}")
# TODO: move to evaluation analysis part.
def _check_forgetting(self, pass_before_results_all, pass_after_results_all):
# log the forgotten bugs
em_forgotten_passes = []
for ind in range(len(self.forget_eval_loader.data)):
em_before = pass_before_results_all["EM"][ind]
em_after = pass_after_results_all["EM"][ind]
# f1_before = pass_before_results_all["QA-F1"][ind]
# f1_after = pass_after_results_all["QA-F1"][ind]
uuid = self.forget_eval_loader.data[ind][2] # (input, output, uuid)
if em_before == 1 and em_after == 0:
em_forgotten_passes.append(uuid)
self.online_debug_results["forgotten_passes"].append(
em_forgotten_passes)
self.logger.info(
f"Number of em_forgotten_passes = {len(em_forgotten_passes)}.")
# self.logger.info(f"UUIDS of fixed bugs = {em_fixed_bugs}")
def evaluate_v1(self, eval_dataloader=None, verbose=False):
"""Evaluates the performance"""
# backup the base model.
self.logger.info("Backing up the base model ...")
base_model_backup = copy.deepcopy(self.base_model)
self.logger.info("Backking up the base model ... Done!")
self.logger.info("Memory Retrieving ...")
# local adaptation for self.base_model of retrieved examples from memory.
keys = self.memroy_module.encode_examples(eval_dataloader.data)
retrieved_examples = self.memroy_module.query_examples(keys, k=self.debugger_args.replay_size)
replay_data_loader, _ = self.get_dataloader(self.data_args, retrieved_examples, mode="train")
self.logger.info("Memory Retrieving Done ...")
self.logger.info("Temp local adaptation ...")
self.fix_bugs(replay_data_loader) # local adaptation
self.logger.info("Temp local adaptation ... Done")
# get inference as usual.
predictions, results, return_all = super().evaluate(eval_dataloader=None, verbose=False)
del self.base_model
self.base_model = base_model_backup # restore to the original base_model
return predictions, results, return_all
### Check the accumulative results. ###
if (self.data_args.accumulate_eval_freq > 0 and (self.timecode + 1) % self.data_args.accumulate_eval_freq == 0):
accumu_EM, forgotten_ids, fixed_ids, total_len = self.get_accumulative_results()
result_dict["accumulative_EM"] = accumu_EM
result_dict["accumulative_forgotten_ids"] = forgotten_ids
result_dict["accumulative_fixed_ids"] = fixed_ids
result_dict["accumulative_forgotten_rate"] = len(forgotten_ids) / total_len
result_dict["accumulative_fixed_rate"] = len(fixed_ids) / total_len
self.logger.info(" ")
self.logger.info(
f"Doing-Nothing Accumulative EM: {self.accumulate_doing_nothing_EM[self.timecode]}")
self.logger.info(f"My Accumulative EM: {accumu_EM}")
self.logger.info(
f"accumulative_forgotten_rate: {result_dict['accumulative_forgotten_rate']}")
self.logger.info(
f"accumulative_fixed_rate: {result_dict['accumulative_fixed_rate']}")
def get_accumulative_results(self):
EMs = []
forgotten_ids = []
fixed_ids = []
total_len = 0
for data_eval_loader in tqdm(self.data_eval_loaders[:self.timecode], desc="Evaluate Accumulative Results"):
predictions, results, results_all = self.evaluate(data_eval_loader)
EMs.append(results["EM"])
for (_, _, _id), em in zip(data_eval_loader.data, results_all["EM"]):
if _id in self.all_initial_error_ids and em == 1:
fixed_ids.append(_id)
if _id in self.all_initial_pass_ids and em == 0:
forgotten_ids.append(_id)
total_len += 1
return float(np.mean(EMs)), forgotten_ids, fixed_ids, total_len
def single_timecode_eval(self, timecode):
"""Used only for offline eval of a single checkpoint of a specific timecode."""
self.timecode = timecode
result_dict = {} # initialize for the given time code
self.logger.info("Start the Overall Error-Fixing Results....")
# Overall Error-Fixing Results
eval_results_overall_bug = self.evaluate(
self.bug_all_eval_loader, verbose=True)
result_dict["eval_results_overall_bug"] = _pack_as_dict(
*eval_results_overall_bug)
self.logger.info("Start the Overall Error-Fixing Results....Done")
self.logger.info(
"Start the Overall Forgetting Results (Knowledge Retain Acc)....")
# Overall Forgetting Results (Knowledge Retain Acc)
eval_results_overall_forget = self.evaluate(
self.forget_eval_loader, verbose=True)
result_dict["eval_results_overall_forget"] = _pack_as_dict(
*eval_results_overall_forget)
self.logger.info(
"Start the Overall Forgetting Results (Knowledge Retain Acc)....Done")
if self.name == "offline_debug":
# only overall evaluation for the offline debugging.
return result_dict
# Error-Fixing performance on the current batch of errors.
if self.timecode > 0:
self.logger.info(
"Start Error-Fixing performance on the Current batch of errors.....")
bug_eval_loader = self.bug_eval_loaders[self.timecode-1]
eval_results_current_errors = self.evaluate(bug_eval_loader)
result_dict["eval_results_current_errors"] = _pack_as_dict(
*eval_results_current_errors)
self.logger.info(
"Start Error-Fixing performance on the Current batch of errors.....Done")
# Error-Fixing performance on the next batch of errors. (for the computation of real responsive efr)
if self.timecode < len(self.bug_eval_loaders):
self.logger.info(
"Start Error-Fixing performance on the Next batch of errors.....")
bug_eval_loader = self.bug_eval_loaders[self.timecode]
eval_results_next_errors = self.evaluate(bug_eval_loader)
result_dict["eval_results_next_errors"] = _pack_as_dict(
*eval_results_next_errors)
self.logger.info(
"Start Error-Fixing performance on the Next batch of errors.....Done")
return result_dict
def load_data_static(self, data_args):
self.data_args = data_args
self._check_data_args()
# Load bug stream
with open(data_args.bug_stream_json_path) as f:
bug_stream = json.load(f)
self.bug_stream = bug_stream
self.num_bug_batches = len(bug_stream)
self.bug_batch_size = len(bug_stream[0])
# Create data loaders for each error batch.
all_formatted_bugs = []
for bug_batch in tqdm(self.bug_stream, desc="Creating the bug data loaders."):
formatted_bug_batch = self.data_formatter(bug_batch)
all_formatted_bugs += formatted_bug_batch
train_bug_dataloader, eval_bug_dataloader = self.get_dataloader(
data_args, formatted_bug_batch, mode="both")
self.bug_train_loaders.append(train_bug_dataloader)
self.bug_eval_loaders.append(eval_bug_dataloader)
assert len(self.bug_train_loaders) == self.num_bug_batches
self.all_bug_examples = all_formatted_bugs
# Create the all bug loaders.
self.bug_all_train_loader, self.bug_all_eval_loader = self.get_dataloader(
data_args, all_formatted_bugs, mode="both")
# Create the pass pool evaluation loader for the final forgetting issue.
if data_args.upstream_eval_data:
# Create loaders for the sampled pass examples
with open(data_args.upstream_eval_data) as f:
pass_examples = [json.loads(line)
for line in set(f.read().splitlines())]
self.sampled_passes = pass_examples
pass_examples = self.data_formatter(pass_examples)
_, self.forget_eval_loader = self.get_dataloader(
data_args, pass_examples, mode="eval")
if data_args.sampled_upstream_json_path:
# Create loaders for the sampled pass examples
with open(data_args.sampled_upstream_json_path) as f:
sampled_upstream_examples = [json.loads(line)
for line in set(f.read().splitlines())]
self.sampled_upstream_examples = self.upstream_data_formatter(
sampled_upstream_examples)
# self.sampled_upstream_trainloader, self.sampled_upstream_evalloader = self.get_dataloader(
# data_args, sampled_upstream_examples, mode="eval")
return
def online_debug_static(self):
"""For the static error stream."""
self.logger.info("Start Online Debugging with Static Error Mode")
self.logger.info(f"Number of Batches of Bugs: {self.num_bug_batches}")
self.logger.info(f"Bug Batch Size: {self.bug_batch_size}")
self.timecode = 0
if self.debugger_args.save_ckpt_freq:
# save the initial model as the 0-th model.
self._save_base_model()
for bug_train_loader in tqdm(self.bug_train_loaders, desc="Online Debugging (Static)", total=self.num_bug_batches):
############### CORE ###############
# Fix the bugs by mini-batch based "training"
self.logger.info(f"Start bug-fixing .... Timecode: {self.timecode}")
self.fix_bugs(bug_train_loader) # for debugging
self.logger.info("Start bug-fixing .... Done!")
############### CORE ###############
self.timecode += 1
if self.debugger_args.save_ckpt_freq:
self._save_base_model()
# Note that we save the model from the id=1.
# cmr/debug_algs/cl_mbcl_alg.py
def online_debug_static(self):
self.logger.info("Start Online Debugging")
self.logger.info(f"Number of Batches of Bugs: {self.num_bug_batches}")
self.logger.info(f"Bug Batch Size: {self.bug_batch_size}")
self.logger.info(f"Replay Size: {self.debugger_args.replay_size}")
self.logger.info(f"Replay Frequency: {self.debugger_args.replay_frequency}")
self.timecode = 0
if self.debugger_args.save_ckpt_freq:
# save the initial model as the 0-th model.
self._save_base_model()
# For the initial memory.
# TODO: sample and save to the memory.
last_steps = 0
for bug_train_loader in tqdm(self.bug_train_loaders, desc="Online Debugging", total=self.num_bug_batches):
if (self.model_update_steps - last_steps) >= self.debugger_args.replay_frequency \
and self.debugger_args.replay_frequency > 0 and self.debugger_args.replay_size > 0:
# sparse experience replay
self.logger.info("Triggering Sampling from Memory and starting to replay.")
retrieved_examples = self.memroy_module.random_sample(
sample_size=self.debugger_args.replay_size)
replay_data_loader, _ = self.get_dataloader(
self.data_args, retrieved_examples, mode="train")
self.fix_bugs(replay_data_loader) # sparse replay
self.logger.info("Replay-Training done.")
last_steps = self.model_update_steps
############### CORE START ###############
# Fix the bugs by mini-batch based "training"
self.logger.info(f"Start bug-fixing .... Timecode: {self.timecode}")
self.fix_bugs(bug_train_loader) # for debugging
self.logger.info("Start bug-fixing .... Done!")
############### CORE END ###############
self.timecode += 1
if self.debugger_args.save_ckpt_freq:
self._save_base_model()
# Note that we save the model from the id=1.
# So the 0-th checkpoint should be the original base model.
_max = 1000000
flag_store_examples = bool(random.randrange(0, _max)/_max >=
1 - self.debugger_args.memory_store_rate)
if flag_store_examples:
self.logger.info("Saving examples to the memory.")
key_vectors = self.memroy_module.encode_examples(bug_train_loader.data, use_random_keys=bool(self.name in ["er", "mir"]))
self.memroy_module.store_examples(
key_vectors, bug_train_loader.data, timecode=self.timecode)
self.logger.info("Finished.")
self.memroy_module.save_memory_to_path(self.debugger_args.memory_path)
|
CMR-main
|
cmr/debug_algs/_legacy_functions.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from argparse import Namespace
from logging import disable
import numpy as np
import torch
from cmr.models.mybart import MyBart
from cmr.models import run_bart
from cmr.models.utils import (convert_model_to_single_gpu,
freeze_embeds, trim_batch)
from cmr.task_manager.dataloader import GeneralDataset
from transformers import (AdamW, BartConfig, BartTokenizer,
get_linear_schedule_with_warmup)
from cmr.debug_algs.commons import OnlineDebuggingMethod
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
from tqdm import tqdm
from torch import nn
import torch
from torch.nn import functional as F
import abc
import copy
class EWCRegularizer(nn.Module, metaclass=abc.ABCMeta):
'''Abstract module to add continual learning capabilities to a classifier.
'''
def __init__(self, ):
super().__init__()
self.base_model = None # the bart model or other possible models to
# -EWC:
# -> hyperparam: how strong to weigh EWC-loss ("regularisation strength")
self.ewc_lambda = 0
# -> hyperparam (online EWC): decay-term for old tasks' contribution to quadratic term
self.gamma = 1.
# -> "online" (=single quadratic term) or "offline" (=quadratic term per task) EWC
self.online = True
# -> sample size for estimating FI-matrix (if "None", full pass over dataset)
self.fisher_n = None
# -> if True, use provided labels to calculate FI ("empirical FI"); else predicted labels
self.emp_FI = True # otherwise we need to do the inference decoding.
# -> keeps track of number of quadratic loss terms (for "offline EWC")
self.EWC_task_count = 0
def estimate_fisher(self, data_loader, pad_token_id):
'''After completing training on a task, estimate diagonal of Fisher Information matrix.
[data_loader]: <DataSet> to be used to estimate FI-matrix; give batches of size 1
'''
# Prepare <dict> to store estimated Fisher Information matrix
est_fisher_info = {}
for n, p in self.base_model.named_parameters():
if p.requires_grad:
n = n.replace('.', '__')
est_fisher_info[n] = p.detach().clone().zero_()
# Set model to evaluation mode
mode = self.base_model.training
self.base_model.eval()
# Create data-loader to give batches of size 1
# data_loader = utils.get_data_loader(
# dataset, batch_size=1, cuda=self._is_on_cuda(), collate_fn=collate_fn)
# TODO: why batch size =1 ?
# Estimate the FI-matrix for [self.fisher_n] batches of size 1
for index, batch in enumerate(data_loader):
# break from for-loop if max number of samples has been reached
if self.fisher_n is not None:
if index >= self.fisher_n:
break
# run forward pass of model
# x = x.to(self.base_model._device())
batch = [b.to(torch.device("cuda")) for b in batch]
batch[0], batch[1] = trim_batch(
batch[0], pad_token_id, batch[1])
batch[2], batch[3] = trim_batch(
batch[2], pad_token_id, batch[3])
# output = self.base_model(x)
assert self.emp_FI
# -use provided label to calculate loglikelihood --> "empirical Fisher":
# label = torch.LongTensor([y]) if type(y) == int else y
# label = label.to(self.base_model._device())
# calculate negative log-likelihood
# negloglikelihood = F.nll_loss(F.log_softmax(output, dim=1), label)
nll_loss = self.base_model(input_ids=batch[0], attention_mask=batch[1],
decoder_input_ids=batch[2], decoder_attention_mask=batch[3],
is_training=True)
# Calculate gradient of negative loglikelihood
self.base_model.zero_grad()
nll_loss.backward()
###
# Square gradients and keep running sum
for n, p in self.base_model.named_parameters():
if p.requires_grad:
n = n.replace('.', '__')
if p.grad is not None:
est_fisher_info[n] += p.grad.detach() ** 2
# Normalize by sample size used for estimation
est_fisher_info = {n: p/index for n, p in est_fisher_info.items()}
# Store new values in the network
for n, p in self.base_model.named_parameters():
if p.requires_grad:
n = n.replace('.', '__')
# -mode (=MAP parameter estimate)
self.register_buffer('{}_EWC_prev_task{}'.format(n, "" if self.online else self.EWC_task_count+1), p.detach().clone())
# -precision (approximated by diagonal Fisher Information matrix)
if self.online and self.EWC_task_count == 1:
existing_values = getattr(self, '{}_EWC_estimated_fisher'.format(n))
est_fisher_info[n] += self.gamma * existing_values
self.register_buffer('{}_EWC_estimated_fisher{}'.format(n, "" if self.online else self.EWC_task_count+1), est_fisher_info[n])
# If "offline EWC", increase task-count (for "online EWC", set it to 1 to indicate EWC-loss can be calculated)
self.EWC_task_count = 1 if self.online else self.EWC_task_count + 1
# Set model back to its initial mode
self.base_model.train(mode=mode)
def ewc_loss(self):
'''Calculate EWC-loss.'''
if self.EWC_task_count > 0:
losses = []
# If "offline EWC", loop over all previous tasks (if "online EWC", [EWC_task_count]=1 so only 1 iteration)
for task in range(1, self.EWC_task_count+1):
for n, p in self.base_model.named_parameters():
if p.requires_grad:
# Retrieve stored mode (MAP estimate) and precision (Fisher Information matrix)
n = n.replace('.', '__')
mean = getattr(self, '{}_EWC_prev_task{}'.format(
n, "" if self.online else task))
if self.gamma > 0 :
fisher = getattr(self, '{}_EWC_estimated_fisher{}'.format(
n, "" if self.online else task))
# If "online EWC", apply decay-term to the running sum of the Fisher Information matrices
fisher = self.gamma*fisher if self.online else fisher
# Calculate EWC-loss
losses.append((fisher * (p-mean)**2).sum())
else:
# This is just the L2 norm w/o computing the fisher info for weighting
losses.append(((p-mean)**2).sum())
# Sum EWC-loss from all parameters (and from all tasks, if "offline EWC")
return (1./2)*sum(losses)
else:
# EWC-loss is 0 if there are no stored mode and precision yet
# TODO: instead of 0, let's use the normal L2 norm?
return torch.tensor(0., device=torch.device("cuda"))
class OnlineEWC(ContinualFinetuning):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "online_ewc"
def _check_debugger_args(self):
super()._check_debugger_args()
required_atts = [
# ewc-related hyper parameters
"ewc_lambda",
"ewc_gamma",
# "use_sampled_upstream"
]
assert all([hasattr(self.debugger_args, att) for att in required_atts])
return
# ### END ###
def debugger_setup(self, debugger_args):
super().debugger_setup(debugger_args)
# Initializing the EWC Regularzier.
self.regularizer = EWCRegularizer()
self.regularizer.online = True
self.regularizer.ewc_lambda = self.debugger_args.ewc_lambda
self.regularizer.gamma = self.debugger_args.ewc_gamma
self.regularizer.emp_FI = True # TODO: check later.
self.regularizer.base_model = self.base_model
return
def fix_bugs(self, bug_loader, quiet=True):
# bug_dataloader is from self.bug_loaders
self.base_model.train()
train_losses = []
global_step = 0
pad_token_id = self.tokenizer.pad_token_id
# #### For the first update ###
# if self.data_args.use_sampled_upstream and self.timecode==0:
# self.logger.info("Start the initial fisher info matrix computation....")
# upstream_dl, _ = self.get_dataloader(self.data_args, self.sampled_upstream_examples, mode="train")
# upstream_dl.args.train_batch_size = 1
# upstream_fi_dl = upstream_dl.load_dataloader(do_return=True)
# self.regularizer.estimate_fisher(upstream_fi_dl, pad_token_id)
# self.logger.info("Start the initial fisher info matrix computation....Done!")
for epoch_id in range(int(self.debugger_args.num_epochs)):
for batch in tqdm(bug_loader.dataloader, desc=f"Bug-fixing Epoch {epoch_id}", disable=quiet):
# here the batch is a mini batch of the current bug batch
global_step += 1
if self.use_cuda:
# print(type(batch[0]), batch[0])
batch = [b.to(torch.device("cuda")) for b in batch]
batch[0], batch[1] = trim_batch(
batch[0], pad_token_id, batch[1])
batch[2], batch[3] = trim_batch(
batch[2], pad_token_id, batch[3])
# this is the task loss w/o any regularization
loss = self.base_model(input_ids=batch[0], attention_mask=batch[1],
decoder_input_ids=batch[2], decoder_attention_mask=batch[3],
is_training=True)
if self.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if self.regularizer.ewc_lambda > 0: # a hp to control the penalty weight.
# add the regularzation term.
ewc_loss = self.regularizer.ewc_loss()
loss = loss + self.regularizer.ewc_lambda * ewc_loss
train_losses.append(loss.detach().cpu())
loss.backward()
self.model_update_steps += 1
if global_step % self.debugger_args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(
self.base_model.parameters(), self.debugger_args.max_grad_norm)
self.optimizer.step() # We have accumulated enough gradients
self.scheduler.step()
self.base_model.zero_grad()
# TODO: build bsz=1 dataloader for update the fisher information matrix
bug_loader.logger = None
fisher_dataloader = copy.deepcopy(bug_loader)
fisher_dataloader.logger = self.logger
fisher_dataloader.args.train_batch_size = 1
fi_dl = fisher_dataloader.load_dataloader(do_return=True)
self.regularizer.estimate_fisher(fi_dl, pad_token_id)
return
|
CMR-main
|
cmr/debug_algs/cl_online_ewc_alg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from argparse import Namespace
from logging import disable
from cmr.task_manager.eval_metrics import evaluate_func
from cmr.models.bart_with_adapater import BartWithAdapterConfig, MyBartWithAdapter
from cmr.debug_algs.cl_mbcl_alg import KeyValueMemoryModule
from cmr.models.hypernet import ParameterGenerator
import numpy as np
import torch
from cmr.models.mybart import MyBart
from cmr.models import run_bart
from cmr.models.utils import (convert_model_to_single_gpu,
freeze_embeds, trim_batch)
from cmr.task_manager.dataloader import GeneralDataset
from transformers import (AdamW, BartConfig, BartTokenizer,
get_linear_schedule_with_warmup)
from cmr.debug_algs.commons import OnlineDebuggingMethod
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
from tqdm import tqdm
from torch import log, nn
import torch
from torch.nn import functional as F
import transformers
class HyperBart(nn.Module):
def __init__(self, logger, config):
super().__init__()
self.logger = logger
self.config = config
self.bart_model = None
self.weight_generator = None
self.example_encoder, self.example_tokenizer = None, None
# self.stored_task_embs = nn.Parameter(torch.zeros(self.config.num_tasks, self.task_emb_dim)) # for Trainable
# self.register_buffer('stored_task_embs', torch.zeros(self.config.num_tasks, self.task_emb_dim)) # fixed
def apply_adapter_weights(self, adapter_weights):
encoder_params, decoder_params = adapter_weights[:self.config.encoder_layers], adapter_weights[self.config.encoder_layers:]
d_model = self.config.d_model
d_adapter = self.config.adapter_dim
for p, encoder_layer in zip(encoder_params, self.bart_model.encoders()):
# dw, db: down weight, down bias
# uw, ub: up weight, up bias
dw, uw, db, ub = p[0:d_model*d_adapter], \
p[d_model*d_adapter:d_model*d_adapter*2], \
p[d_model*d_adapter*2:d_model*d_adapter*2+d_adapter], \
p[d_model*d_adapter*2+d_adapter:d_model*d_adapter*2+d_adapter+d_model]
encoder_layer.adapter_down_weight = dw.view(d_model, d_adapter)
encoder_layer.adapter_down_bias = db.view(d_adapter)
encoder_layer.adapter_up_weight = uw.view(d_adapter, d_model)
encoder_layer.adapter_up_bias = ub.view(d_model)
if self.config.adapt_layer_norm:
encoder_layer.self_attn_layer_norm.weight.data = encoder_layer.self_attn_layer_norm.weight.data + p[-2*d_model: -1*d_model]
encoder_layer.self_attn_layer_norm.bias.data = encoder_layer.self_attn_layer_norm.bias.data + p[-1*d_model:]
for p, decoder_layer in zip(decoder_params, self.bart_model.decoders()):
dw, uw, db, ub = p[0:d_model*d_adapter], \
p[d_model*d_adapter:d_model*d_adapter*2], \
p[d_model*d_adapter*2:d_model*d_adapter*2+d_adapter], \
p[d_model*d_adapter*2+d_adapter:d_model*d_adapter*2+d_adapter+d_model]
decoder_layer.adapter_down_weight = dw.view(d_model, d_adapter)
decoder_layer.adapter_down_bias = db.view(d_adapter)
decoder_layer.adapter_up_weight = uw.view(d_adapter, d_model)
decoder_layer.adapter_up_bias = ub.view(d_model)
if self.config.adapt_layer_norm:
decoder_layer.self_attn_layer_norm.weight.data = decoder_layer.self_attn_layer_norm.weight.data + p[-2*d_model: -1*d_model]
decoder_layer.self_attn_layer_norm.bias.data = decoder_layer.self_attn_layer_norm.bias.data + p[-1*d_model:]
def forward(self, input_ids, attention_mask=None, encoder_outputs=None,
decoder_input_ids=None, decoder_attention_mask=None, decoder_cached_states=None,
use_cache=False, is_training=False, task_emb=None):
""""overwrite the bart.forward function"""
# assert task_emb.dim() == 1
# generated_weights = None
generated_weights = self.weight_generator(task_emb.unsqueeze(0))
# self.bart_model.set_adapter_weights(generated_weights)
self.apply_adapter_weights(adapter_weights=generated_weights)
ret = self.bart_model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, is_training=is_training, use_cache=use_cache)
return ret
def load_example_encoder(self):
tmp = KeyValueMemoryModule(self.logger)
self.example_encoder, self.example_tokenizer = tmp.load_key_encoder(memory_key_encoder=self.config.example_encoder_name)
def get_task_embeddings(self, dataloader):
# TODO: get the ids of the examples
# TODO: get the vectors of these ids
# TODO: aggreagte the vectors (with mean) to get a task embedding vector.
examples = dataloader.data
tmp = KeyValueMemoryModule(self.logger)
tmp.tokenizer = self.example_tokenizer
tmp.key_encoder = self.example_encoder
all_vectors = tmp.encode_examples_for_caching(examples, return_tensors=True)
all_vectors = torch.stack(all_vectors)
# print(all_vectors)
mean_embedding = torch.mean(all_vectors, 0)
# print(mean_embedding)
return mean_embedding
# def init_weight_generator(self):
# # make sure config has such attrs
# # config.encoder_layers
# # config.decoder_layers
# # config.activation_function
# # config.activation_function
# # config.generator_hdim
# # config.task_emb_dim
# # config.d_model
# # config.adapter_dim
# # config.adapt_layer_norm
# self.weight_generator = ParameterGenerator(self.config)
class HyperCL(ContinualFinetuning):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "hyper_cl"
def _check_debugger_args(self):
super()._check_debugger_args()
required_atts = ["adapter_dim", "example_encoder_name", "task_emb_dim"]
assert all([hasattr(self.debugger_args, att) for att in required_atts])
def debugger_setup(self, debugger_args):
self.debugger_args = debugger_args
self._check_debugger_args()
model_type, base_model_path = self.base_model_args.model_type, self.base_model_args.base_model_path
# Set up the additional configs
config = BartWithAdapterConfig.from_pretrained(model_type)
config.adapter_dim = debugger_args.adapter_dim
config.adapt_layer_norm = False # debugger_args.adapt_layer_norm
# config.unfreeze_hyper_encoder = debugger_args.unfreeze_hyper_encoder
# config.num_tasks = len(self.all_bug_examples) # number of the overall examples in the error stream.
config.task_emb_dim = debugger_args.task_emb_dim # the dim of the CLS token embedding of the below model.
config.example_encoder_name = debugger_args.example_encoder_name
# Set up the HyperBart model
self.base_model = HyperBart(self.logger, config)
hyper_bart = self.base_model # make an alias to indicate the special arch.
hyper_bart.bart_model = MyBartWithAdapter(config)
hyper_bart.weight_generator = ParameterGenerator(config)
hyper_bart.load_example_encoder()
# Load the bart model of the HyperBart model.
self.logger.info(f"Loading checkpoint from {base_model_path} for {model_type} .....")
mybart_model = MyBart.from_pretrained(model_type, state_dict=convert_model_to_single_gpu(torch.load(base_model_path)))
hyper_bart.bart_model.model.load_state_dict(mybart_model.model.state_dict(), strict=False)
# TODO: load the cache of both bart and the weight generator
if self.use_cuda:
# Enable multi-gpu training.
hyper_bart.to(torch.device("cuda"))
self.logger.info("Moving to the GPUs.")
if self.n_gpu > 1:
hyper_bart = torch.nn.DataParallel(hyper_bart)
hyper_bart = hyper_bart.module if self.n_gpu > 1 else hyper_bart
# TODO: set up the memory for the "task embedding"
self.stored_task_embs = None
## we can assume that we have a pre-defined number of incoming examples.
## pre-computed by a frozen bert for each example.
## set up a method to extend the look-up table.
# Set up the optimizer.
no_decay = ['bias', 'LayerNorm.weight']
self.optimizer_grouped_parameters = [
# Note that we only update the hypernetwork.
{'params': [p for n, p in hyper_bart.weight_generator.decoders.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': debugger_args.weight_decay},
{'params': [p for n, p in hyper_bart.weight_generator.decoders.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
self.optimizer = AdamW(self.optimizer_grouped_parameters,
lr=debugger_args.learning_rate, eps=debugger_args.adam_epsilon)
# TODO: double check the decision about warm up for fine-tuning
self.scheduler = get_linear_schedule_with_warmup(self.optimizer,
num_warmup_steps=debugger_args.warmup_steps,
num_training_steps=debugger_args.total_steps)
self.logger.info(f"Debugger Setup ...... Done!")
return
def load_base_model(self, base_model_args, mode="online_debug"):
self.base_model_args = base_model_args
if mode=="offline_eval":
model_type, base_model_path = self.base_model_args.model_type, self.base_model_args.base_model_path
# Set up the additional configs
config = BartWithAdapterConfig.from_pretrained(model_type)
config.adapter_dim = self.debugger_args.adapter_dim
config.adapt_layer_norm = False # self.debugger_args.adapt_layer_norm
# config.unfreeze_hyper_encoder = debugger_args.unfreeze_hyper_encoder
# config.num_tasks = len(self.all_bug_examples) # number of the overall examples in the error stream.
config.task_emb_dim = self.debugger_args.task_emb_dim # the dim of the CLS token embedding of the below model.
config.example_encoder_name = self.debugger_args.example_encoder_name
# Set up the HyperBart model
self.base_model = HyperBart(self.logger, config)
else:
pass # the base_model is initiated in the debugger_setup
return
def fix_bugs(self, bug_loader, quiet=True):
# set the states of the hypernetwork and the base model for inference
self.base_model.train()
train_losses = []
global_step = 0
pad_token_id = self.tokenizer.pad_token_id
hyper_bart = self.base_model # alias
task_emb = hyper_bart.get_task_embeddings(bug_loader)
self.base_model.train()
train_losses = []
global_step = 0
for epoch_id in range(int(self.debugger_args.num_epochs)):
for batch in tqdm(bug_loader.dataloader, desc=f"Bug-fixing Epoch {epoch_id}", disable=quiet):
global_step += 1
# here the batch is a mini batch of the current bug batch
if self.use_cuda:
# print(type(batch[0]), batch[0])
batch = [b.to(torch.device("cuda")) for b in batch]
pad_token_id = self.tokenizer.pad_token_id
batch[0], batch[1] = trim_batch(
batch[0], pad_token_id, batch[1])
batch[2], batch[3] = trim_batch(
batch[2], pad_token_id, batch[3])
loss = hyper_bart(task_emb=task_emb, input_ids=batch[0], attention_mask=batch[1],
decoder_input_ids=batch[2], decoder_attention_mask=batch[3],
is_training=True)
if self.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
train_losses.append(loss.detach().cpu())
loss.backward()
self.model_update_steps += 1
if global_step % self.debugger_args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(
hyper_bart.parameters(), self.debugger_args.max_grad_norm)
self.optimizer.step() # We have accumulated enough gradients
self.scheduler.step()
hyper_bart.zero_grad()
return
def get_task_split_for_inference(self):
pass
def evaluate(self, eval_dataloader=None, verbose=False):
"""Evaluates the performance"""
if not eval_dataloader:
eval_dataloader = self.submission_eval_loaders[self.timecode]
# prepare adapt_dataloaders
adapt_dataloaders = self.get_adapt_dataloaders(eval_dataloader, verbose=True)
predictions = self.base_model_infer_with_adaptation(eval_dataloader, adapt_dataloaders, verbose)
assert len(predictions) == len(eval_dataloader)
predictions = [p.strip() for p in predictions]
results, return_all = evaluate_func(
predictions, eval_dataloader.data, self.metric, return_all=True)
return predictions, results, return_all
|
CMR-main
|
cmr/debug_algs/cl_hypernet_alg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from argparse import Namespace
from datetime import time
from logging import disable
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
import numpy as np
import torch
from cmr.models.mybart import MyBart
from cmr.models import run_bart
from cmr.models.utils import (convert_model_to_single_gpu,
freeze_embeds, trim_batch)
from cmr.task_manager.dataloader import GeneralDataset
from transformers import (AdamW, BartConfig, BartTokenizer,
get_linear_schedule_with_warmup)
from tqdm import tqdm
class NoneCL(ContinualFinetuning):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "none_cl"
def _check_debugger_args(self):
return
def debugger_setup(self, debugger_args):
self.logger.info(f"No debugger!")
self.debugger_args = debugger_args
self._check_debugger_args()
return
def fix_bugs(self, bug_loader, quiet=True):
# bug_dataloader is from self.bug_loaders
self.logger.info("No debugging at all.")
return
class OfflineCL(NoneCL):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "none_cl_offline_eval"
def _check_debugger_args(self):
return
def online_debug(self):
self.logger.info("Start Online Debugging with Dynamic Error Mode")
self.logger.info(f"Number of Batches of Data: {self.num_data_batches}")
self.logger.info(f"Data Batch Size: {self.data_batch_size};")
self.timecode = 0
# if self.debugger_args.save_ckpt_freq:
# # save the initial model as the 0-th model.
# self._save_base_model()
data_args = self.data_args
bug_eval_loaders = []
for data_batch in tqdm(self.data_stream, desc="Creating the data loaders."):
data_batch += [item for item in data_batch if item["init_status"] == "error"] # keep only the initial errors
formatted_data_batch = self.data_formatter(data_batch)
_, eval_data_dataloader = self.get_dataloader(
data_args, formatted_data_batch, mode="eval")
bug_eval_loaders.append(eval_data_dataloader)
for bug_eval_loader, data_eval_loader in tqdm(zip(bug_eval_loaders, self.data_eval_loaders), desc="Online Evaluation"):
result_dict = {"timecode": self.timecode} # start with 0
if self.timecode+1 == len(self.data_eval_loaders):
self.eval_knowledge_retention(result_dict)
self.eval_knowledge_generalization(result_dict)
# self._replay_based_eval(result_dict)
_ = self._get_dynamic_errors(data_eval_loader, result_dict, return_raw_bug_examples=True) # we don't need the dataloader and empty cause false
# bug_eval_loader = bug_eval_loaders[self.timecode]
self.evaluate_error_fixing(result_dict, bug_eval_loader)
self._update_result_dict(result_dict)
# if self.debugger_args.save_ckpt_freq > 0 and self.timecode % self.debugger_args.save_ckpt_freq == 0:
# # self._save_base_model()
# self.save_result_file()
self.logger.info("-"*50)
self.timecode += 1
#### Final evaluation ####
self.final_evaluation()
#### Save the final model ####
self._save_base_model()
|
CMR-main
|
cmr/debug_algs/cl_none.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import json
import random
from cmr.benchmark_gen import sample_stream_data
from cmr.task_manager.eval_metrics import evaluate_func
def create_training_stream(args, logger):
assert not args.use_dev_stream
# setattr(data_args, "data_stream_json_path", args.data_stream_json_path)
# setattr(data_args, "replay_stream_json_path", args.replay_stream_json_path)
# with open(data_args.data_stream_json_path) as f:
# data_stream = json.load(f)
upstream_truth_data = []
with open(args.upstream_data_file) as fin:
lines = fin.readlines()
for line in lines:
# d = line.strip().split("\t")
# truth_data.append((d[0], d[1:]))
d = json.loads(line)
upstream_truth_data.append((d["input"], d["output"], d["id"]))
with open(args.upstream_data_prediction_file, "r") as f:
M0_predictions = json.load(f)
logger.info(f"len(predictions): {len(M0_predictions)}")
logger.info(f"len(upstream_truth_data]): {len(upstream_truth_data)}")
results, results_all = evaluate_func(
M0_predictions, upstream_truth_data , "EM|QA-F1", return_all=True)
logger.info(f"Upstream evaluation results: {results}")
bug_pool, pass_pool = sample_stream_data.generate_bugs(M0_predictions, upstream_truth_data, results_all, f1_upper_bound=1.0)
logger.info(f"len(bug_pool)={len(bug_pool)}")
logger.info(f"len(pass_pool)={len(pass_pool)}")
# TODO: add some pass_pool examples in bug pool?
sampled_M0_errors = random.sample(bug_pool, args.train_stream_length * args.train_stream_episode_size)
sampled_init_memory = random.sample(pass_pool, args.init_memory_size)
sampled_train_stream = sample_stream_data.get_data_stream(
sampled_M0_errors, args.train_stream_episode_size, args.train_stream_length, use_score=False)
# randomly sorted bugs
return sampled_init_memory, sampled_train_stream
def create_training_stream_with_dev(args, logger):
assert args.use_dev_stream
dev_memory = []
with open(args.dev_memory) as f:
for line in f.read().splitlines():
d = json.loads(line)
dev_memory.append(d)
sampled_init_memory = random.sample(dev_memory, args.init_memory_size)
with open(args.dev_stream) as f:
dev_stream = json.load(f)
dev_stream_examples = []
# print(len(dev_stream))
for batch in dev_stream:
for item in batch:
# print(item.keys())
dev_stream_examples.append(item)
# print(dev_stream_examples[:3])
# print(len(dev_stream_examples))
random.shuffle(dev_stream_examples)
sampled_M0_errors = random.sample(dev_stream_examples, args.train_stream_length * args.train_stream_episode_size)
sampled_train_stream = sample_stream_data.get_data_stream(
sampled_M0_errors, args.train_stream_episode_size, args.train_stream_length, use_score=False)
# randomly sorted bugs
return sampled_init_memory, sampled_train_stream
|
CMR-main
|
cmr/debug_algs/distant_supervision/ds_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
"""
This script is used to get the training data for learning a retriever that can get back the most forgettable examples given a batch of error cases to fix.
Input:
- The training streams. ---> get the error cases.
- model.
Output:
- The pairs between error cases and associated forgettable examples.
Key logic:
- Use the simple_CL method and put it work on the training streams (can be randomly sampled.)
- For each episode, before and after the error-fixing (continual fine-tuning) step, we record the forgetted the examples.
"""
import copy
import pickle
from cmr.debug_algs.cl_utils import _keep_first_answer
from cmr.debug_algs.distant_supervision.ds_utils import create_training_stream, create_training_stream_with_dev
from cmr.debug_algs.index_based.index_manager import RandomMemoryManger
from cmr.debug_algs.index_based.index_utils import get_bart_dual_representation
from cmr.models import run_bart
from cmr.models.utils import set_seeds, trim_batch
import torch
from scipy.stats.stats import describe
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
import random
from tqdm import tqdm
from cmr.debug_algs import run_lifelong_finetune
from cmr.benchmark_gen import sample_stream_data
import json
from cmr.task_manager.eval_metrics import evaluate_func
from collections import OrderedDict
from operator import getitem
class MiningSupervision(ContinualFinetuning):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "simple_ds_mine"
self.init_model = None
def _check_data_args(self, additional_args):
pass
def compute_MIR_scores(self, before_model, after_model, examples):
_examples = _keep_first_answer(examples)
mlr_data_args = copy.deepcopy(self.data_args)
mlr_data_args.predict_batch_size = 4 # TODO: set an arg.
memory_buffer_loader, _ = self.get_dataloader(
mlr_data_args, _examples, mode="train", is_training=False) # fix of the order
before_losses = run_bart.inference(
before_model, memory_buffer_loader, compute_loss=True, loss_only=True, logger=self.logger)
after_losses = run_bart.inference(
after_model, memory_buffer_loader, compute_loss=True, loss_only=True, logger=self.logger)
MIR_scores = {}
for example, before_loss, after_loss in zip(examples, before_losses, after_losses):
loss_delta = after_loss - before_loss
MIR_scores[example[2]] = loss_delta # id, score.
return MIR_scores
def get_pos_neg_results(self, examples, scores, positive_size=8, negative_size=8):
examples_dict = {ex[2]: ex for ex in examples}
sorted_scores = sorted(scores.items(), key = lambda x:x[1], reverse = True)
pos_ids = [x[0] for x in sorted_scores[:positive_size]]
neg_ids = [x[0] for x in sorted_scores[-negative_size:]]
positive_results = [examples_dict[ex_id] for ex_id in pos_ids]
negative_results = [examples_dict[ex_id] for ex_id in neg_ids]
return positive_results, negative_results
def wrap_supervision(self, before_model, after_model, query_examples, positive_results, negative_results):
cl_trainer = self
tokenizer = self.tokenizer
data_args = copy.deepcopy(self.data_args)
data_args.predict_batch_size = 4
# TODO: two options here:
if self.all_args.long_term_delta:
# Optional: using the delta versus the init model
self.logger.info("Using initial model as the before model for computing query vecs.")
before_model = self.init_model
supervision = {}
supervision["mode"] = "all_hiddens" if self.all_args.save_all_hiddens else "mean_reps"
supervision["positive"] = {}
supervision["negative"] = {}
top_examples = []
if self.all_args.save_all_hiddens:
supervision["query_before"] = {}
supervision["query_after"] = {}
query_hiddens_before = get_bart_dual_representation(cl_trainer, before_model, tokenizer, data_args, query_examples, return_all_hidden=True)
query_hiddens_after = get_bart_dual_representation(cl_trainer, after_model, tokenizer, data_args, query_examples, return_all_hidden=True)
positive_hiddens = get_bart_dual_representation(cl_trainer, self.init_model, tokenizer, data_args, positive_results, return_all_hidden=True)
negative_hiddens = get_bart_dual_representation(cl_trainer, self.init_model, tokenizer, data_args, negative_results, return_all_hidden=True)
for ind, example in enumerate(query_examples):
supervision["query_before"][example[2]] = {k: v[ind] for k, v in query_hiddens_before.items()}
supervision["query_after"][example[2]] = {k: v[ind] for k, v in query_hiddens_after.items()}
for ind, example in enumerate(positive_results):
supervision["positive"][example[2]] = {k: v[ind] for k, v in positive_hiddens.items()}
for ind, example in enumerate(negative_hiddens):
supervision["negative"][example[2]] = {k: v[ind] for k, v in negative_hiddens.items()}
else:
supervision["query"] = {}
query_vectors_before = get_bart_dual_representation(cl_trainer, before_model, tokenizer, data_args, query_examples)
query_vectors_after = get_bart_dual_representation(cl_trainer, after_model, tokenizer, data_args, query_examples)
assert len(query_vectors_before) == len(query_vectors_after) == len(query_examples)
for example, q1, q2 in zip(query_examples, query_vectors_before, query_vectors_after):
supervision["query"][example[2]] = list(q1) + list(q2) # concat
positive_vectors = get_bart_dual_representation(cl_trainer, self.init_model, tokenizer, data_args, positive_results)
negative_vectors = get_bart_dual_representation(cl_trainer, self.init_model, tokenizer, data_args, negative_results)
for example, vector in zip(positive_results, positive_vectors):
supervision["positive"][example[2]] = list(vector)
top_examples.append(example)
for example, vector in zip(negative_results, negative_vectors):
supervision["negative"][example[2]] = list(vector)
return supervision, top_examples
def mine_supervision(self, memory_manager=None, all_args=None):
self.all_args = all_args
self.logger.info("Start Mining Distant Supervision (as online debugging).")
sub_stream_dataloaders = self.data_eval_loaders
self.logger.info(f"Number of Batches of Data: {len(sub_stream_dataloaders)}")
self.logger.info(f"Data Batch Size: {self.data_batch_size};")
self.timecode = 0
mined_supervision = []
for data_eval_loader in tqdm(sub_stream_dataloaders, desc="Mining Supervision from Dynamic Error Stream"):
episode_data = data_eval_loader.data
bug_train_loader, _ = self.get_dataloader(
self.data_args, episode_data, mode="train")
# TODO: this is actually not errors for M_t, it is just M_0's errors
model_copy = copy.deepcopy(self.base_model)
############### CORE ###############
# Fix the bugs by mini-batch based "training"
self.logger.info(f"Start error-fixing .... Timecode: {self.timecode}")
self.fix_bugs(bug_train_loader) # for debugging
self.logger.info("Start error-fixing .... Done!")
############### CORE ###############
updated_model = self.base_model
sampled_examples = memory_manager.retrieve_from_memory(sample_size=all_args.mir_buffer_size)
MIR_scores = self.compute_MIR_scores(model_copy, updated_model, sampled_examples)
self.timecode += 1
positive_results, negative_results = self.get_pos_neg_results(sampled_examples,
MIR_scores, positive_size=all_args.positive_size, negative_size=all_args.negative_size)
supervision, top_examples = self.wrap_supervision(model_copy, updated_model, episode_data, positive_results, negative_results)
self.logger.info(f"Get an instance for supervision at {self.timecode}")
mined_supervision.append(supervision)
memory_manager.store_examples(episode_data)
# update with the sampled examples
self.base_model = model_copy
self.reset_optimizer()
mixed_data = episode_data + top_examples
mixed_bug_train_loader, _ = self.get_dataloader(
self.data_args, mixed_data, mode="train")
self.fix_bugs(mixed_bug_train_loader) # for debugging
# del model_copy
return mined_supervision
# if self.debugger_args.save_ckpt_freq:
# self._save_base_model()
if __name__ == '__main__':
parser = run_lifelong_finetune.get_cli_parser()
parser.add_argument("--upstream_data_file", type=str,
default="data/mrqa_naturalquestions/mrqa_naturalquestions_train.jsonl",
help="the path to upstream data")
parser.add_argument("--upstream_data_prediction_file", type=str, # by the initial model M_0
default="bug_data/mrqa_naturalquestions_train.predictions.jsonl",
help="the path to initial model's predictions on the upstream data")
parser.add_argument("--dev_memory", type=str, # by the initial model M_0
default="exp_results/data_streams/mrqa.nq_train.memory.jsonl",
help="the path to initial model's predictions on the upstream data")
parser.add_argument("--dev_stream", type=str, # by the initial model M_0
default="exp_results/data_streams/mrqa.mixed.data_stream.test.json",
help="the path to initial model's predictions on the upstream data")
parser.add_argument("--output_supervision", type=str,
help="the path to save the thread results")
parser.add_argument('--train_stream_length', type=int, default=100)
parser.add_argument('--train_stream_episode_size', type=int, default=16)
parser.add_argument('--init_memory_size', type=int, default=10000)
parser.add_argument('--num_rounds', type=int, default=1)
parser.add_argument('--positive_size', type=int, default=8)
parser.add_argument('--negative_size', type=int, default=8)
parser.add_argument('--mir_buffer_size', type=int, default=256)
parser.add_argument('--use_dev_stream', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--long_term_delta', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--save_all_hiddens', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--debug_mode', default=True, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
args = parser.parse_args()
# debuggging
args.cl_method_name = "simple_ds_mine"
if args.debug_mode:
args.use_dev_stream = True
args.long_term_delta = True
assert args.cl_method_name == "simple_ds_mine"
## init the useful args ##
cl_supervision_miner, data_args, base_model_args, debugger_args, logger = run_lifelong_finetune.setup_args(
args)
setattr(data_args, "replay_stream_json_path", "")
## Init the cl_supervision_miner
cl_supervision_miner.load_base_model(base_model_args)
cl_supervision_miner.init_model = copy.deepcopy(cl_supervision_miner.base_model) # maintain M0
## Create Training Stream ##
for _rid in range(args.num_rounds):
logger.info(f"Starting Round {_rid} ....")
seeds = list(range(100000))
random.shuffle(seeds)
selected_seed = seeds[args.seed] # actually the index
logger.info(f"Active Seed = {selected_seed}")
set_seeds(selected_seed)
if not args.use_dev_stream:
initial_memory, sampled_train_stream = create_training_stream(args, logger)
else:
initial_memory, sampled_train_stream = create_training_stream_with_dev(args, logger)
## Init the RandomMemroy module ##
memory_manager = RandomMemoryManger(logger) # TODO: try the BART-base one?
formatted_initial_memory = cl_supervision_miner.data_formatter(initial_memory)
memory_manager.set_up_initial_memory(formatted_examples=formatted_initial_memory)
logger.info(f"Initial memory size: {memory_manager.get_memory_size()}")
cl_supervision_miner.load_data(data_args, given_data_stream=sampled_train_stream)
cl_supervision_miner.debugger_setup(debugger_args)
mined_supervision = cl_supervision_miner.mine_supervision(memory_manager, all_args=args)
path_to_save = args.output_supervision.replace(".pkl", f"-{_rid}.pkl")
with open(path_to_save, "wb") as f:
logger.info(f"Saving {f.name}")
pickle.dump(mined_supervision, f)
logger.info(f"Saving {f.name}...Done!")
logger.info(f"Finished Round {_rid} !")
"""
# debug
index=0
gpu=0
prefix=data_collection_simple_${thread}
log_file=exp_results/supervision_data/logs/run_${prefix}.log
CUDA_VISIBLE_DEVICES=${gpu} python cmr/debug_algs/distant_supervision/data_collection.py \
--cl_method_name simple_ds_mine \
--seed ${thread} \
--output_supervision "exp_results/supervision_data/simple_mir_dm/dm.${thread}.pkl" \
--learning_rate 3e-5 --num_train_epochs 5 --train_batch_size 10 \
--prefix ${prefix} \
--stream_mode dynamic \
--replay_stream_json_path "" \
--upstream_eval_data exp_results/data_streams/mrqa_naturalquestions_dev.hidden_passes.jsonl \
--save_ckpt_freq 0
> ${log_file} 2>&1
&
echo $log_file
"""
|
CMR-main
|
cmr/debug_algs/distant_supervision/data_collection.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import torch
from tqdm import tqdm
from transformers.modeling_bart import _prepare_bart_decoder_inputs
from transformers.tokenization_utils import trim_batch
import numpy as np
from cmr.debug_algs.cl_utils import _keep_first_answer
def masked_mean(reps, masks):
masks = masks.view(reps.size()[0], reps.size()[1], 1)
masked_reps = reps * masks
masked_reps_sum = masked_reps.sum(dim=1)
length_reps = masks.sum(dim=1).view(masked_reps_sum.size()[0], 1)
mean_reps = masked_reps_sum / length_reps
return mean_reps
def get_bart_dual_representation(cl_trainer, bart_model, tokenizer, data_args, examples, return_all_hidden=False, agg_method="mean"):
examples_with_single_ans = _keep_first_answer(examples)
data_manager, _ = cl_trainer.get_dataloader(data_args,
examples_with_single_ans,
mode="train",
is_training=False)
all_vectors = []
bart_model = bart_model if cl_trainer.n_gpu == 1 else bart_model.module
bart_model.eval()
all_hiddens = {"input_reps":[], "input_masks": [], "output_reps": [] , "output_masks": []}
for batch in tqdm(data_manager.dataloader, desc="Computing BART representation"):
# self.logger.info(f"len(batch)={len(batch)}")
if cl_trainer.use_cuda:
# print(type(batch[0]), batch[0])
batch = [b.to(torch.device("cuda")) for b in batch]
pad_token_id = tokenizer.pad_token_id
batch[0], batch[1] = trim_batch(
batch[0], pad_token_id, batch[1])
batch[2], batch[3] = trim_batch(
batch[2], pad_token_id, batch[3])
# Encode the input text with BART-encoder
input_ids = batch[0]
input_attention_mask = batch[1]
encoder_outputs = bart_model.model.encoder(
input_ids, input_attention_mask)
x = encoder_outputs[0]
#
if agg_method == "mean":
x = masked_mean(x, input_attention_mask) # use the mean instead of the first
elif agg_method == "first":
x = x[:, 0, :]
input_vectors = x.detach().cpu().numpy()
# self.logger.info(f"input_vectors.shape = {input_vectors.shape}")
# Encode the output text with BART-decoder
output_ids = batch[2]
output_attention_mask = batch[3]
decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_bart_decoder_inputs(
bart_model.model.config,
input_ids,
decoder_input_ids=output_ids,
decoder_padding_mask=output_attention_mask,
causal_mask_dtype=bart_model.model.shared.weight.dtype,
)
decoder_outputs = bart_model.model.decoder(
decoder_input_ids,
encoder_outputs[0],
input_attention_mask,
decoder_padding_mask,
decoder_causal_mask=causal_mask,
decoder_cached_states=None,
use_cache=False
)
y = decoder_outputs[0]
if agg_method == "mean":
y = masked_mean(y, output_attention_mask) # use the mean instead of the first
elif agg_method == "first":
y = y[:, 0, :]
output_vectors = y.detach().cpu().numpy()
# self.logger.info(f"output_vectors.shape = {output_vectors.shape}")
# concatenate the vectors
vectors = np.concatenate([input_vectors, output_vectors], axis=1)
if return_all_hidden:
all_hiddens["input_reps"] += list(encoder_outputs[0].detach().cpu().numpy())
all_hiddens["output_reps"] += list(decoder_outputs[0].detach().cpu().numpy())
all_hiddens["input_masks"] += list(input_attention_mask.detach().cpu().numpy())
all_hiddens["output_masks"] += list(output_attention_mask.detach().cpu().numpy())
# self.logger.info(f"vectors.shape = {vectors.shape}")
all_vectors += list(vectors)
del batch
del encoder_outputs
del decoder_outputs
if return_all_hidden:
return all_hiddens
else:
return all_vectors
|
CMR-main
|
cmr/debug_algs/index_based/index_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
|
CMR-main
|
cmr/debug_algs/index_based/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from cmr.debug_algs.cl_utils import get_top_interfered_examples, get_virtual_updated_model
from cmr.debug_algs.index_based.IO_each_index import BartIOIndexManager
from cmr.debug_algs.index_based.biencoder import BiEncoderIndexManager
from cmr.debug_algs.index_based.index_manager import BartIndexManager, RandomMemoryManger
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
from tqdm import tqdm
import random
import numpy as np
import torch
import transformers
from cmr.task_manager.eval_metrics import evaluate_func
import copy
import pickle
import os
from cmr.models.mybart import MyBart
from cmr.models import run_bart
from cmr.models.utils import (convert_model_to_single_gpu,
freeze_embeds, trim_batch)
from argparse import Namespace
import more_itertools
import json
class IndexBasedCL(ContinualFinetuning):
def __init__(self, logger):
super().__init__(logger=logger)
self.name = "tbd"
def _check_debugger_args(self):
super()._check_debugger_args()
required_atts = [
"replay_size",
"replay_candidate_size",
"replay_frequency",
"memory_store_rate", # 0, 0.1, 1 etc.
"upstream_sample_ratio",
"memory_path", # to save the memory module from disk
"use_replay_mix",
"init_memory_cache_path",
"index_rank_method"
]
assert all([hasattr(self.debugger_args, att) for att in required_atts])
# assert self.debugger_args.index_rank_method in ["most_similar", "most_different"]
def debugger_setup(self, debugger_args):
super().debugger_setup(debugger_args)
self.memroy_module = None # if online is used seperately
self.upstream_memroy_module = None
def setup_bart_index():
mm = BartIndexManager(self.logger)
mm.set_up_data_args(self.data_args)
mm.data_args.predict_batch_size = 4
mm.load_encoder_model(self.base_model_args)
return mm
def setup_bart_io_index():
mm = BartIOIndexManager(self.logger)
mm.set_up_data_args(self.data_args)
mm.data_args.predict_batch_size = 4
mm.load_encoder_model(self.base_model_args)
return mm
def setup_biencoder():
with open(debugger_args.indexing_args_path) as f:
train_args_dict = json.load(f)
mm = BiEncoderIndexManager(self.logger)
mm.train_args = Namespace(**train_args_dict)
mm.set_up_data_args(self.data_args)
mm.data_args.predict_batch_size = 4
mm.load_encoder_model(
self.base_model_args,
mm.train_args.memory_encoder_path,
mm.train_args.query_encoder_path)
return mm
# Initializing the BartIndexManager
self.logger.info(f"indexing_method={debugger_args.indexing_method}")
self.name = f"index_cl_{debugger_args.indexing_method}"
if debugger_args.indexing_method == "bart_index":
self.logger.info("setup_bart_index")
self.upstream_memroy_module = setup_bart_index()
elif debugger_args.indexing_method == "bart_io_index":
self.logger.info("bart_io_index")
self.upstream_memroy_module = setup_bart_io_index()
elif debugger_args.indexing_method == "biencoder":
self.logger.info("biencoder")
self.upstream_memroy_module = setup_biencoder()
assert self.upstream_memroy_module is not None
if debugger_args.init_memory_cache_path:
self.upstream_memroy_module.load_memory_from_path(debugger_args.init_memory_cache_path)
else:
self.upstream_memroy_module.set_up_initial_memory(
formatted_examples=self.sampled_upstream_examples)
if self.debugger_args.upstream_sample_ratio < 0:
self.logger.info("upstream_sample_ratio < 0 ; self.memroy_module <---> self.upstream_memroy_module")
self.memroy_module = self.upstream_memroy_module
else:
self.logger.info("upstream_sample_ratio > 0 ; two seperate memory module")
if debugger_args.indexing_method == "bart_io_index":
self.memroy_module = setup_bart_io_index()
elif debugger_args.indexing_method == "biencoder":
self.memroy_module = setup_biencoder()
elif debugger_args.indexing_method == "bart_index":
self.memroy_module = setup_bart_index()
return
def online_debug(self):
self.logger.info("Start Online Debugging with Dynamic Error Mode")
self.logger.info(f"Number of Batches of Data: {self.num_data_batches}")
self.logger.info(f"Data Batch Size: {self.data_batch_size};")
self.timecode = 0
if self.debugger_args.save_ckpt_freq > 0 and self.timecode % self.debugger_args.save_ckpt_freq == 0:
# save the initial model as the 0-th model.
self._save_base_model()
self.past_errors = []
self.past_submission = []
last_steps = 0
self.logger.info("Copying initial model")
initial_model = copy.deepcopy(self.base_model) # for the use of query
for data_eval_loader in tqdm(self.data_eval_loaders, desc="Online Debugging (with Index-based replay)"):
result_dict = {"timecode": self.timecode} # start with 0
self.eval_knowledge_retention(result_dict)
self.eval_knowledge_generalization(result_dict)
############### CORE ###############
# self._replay_based_eval(result_dict)
formatted_bug_examples = self._get_dynamic_errors(
data_eval_loader, result_dict, return_raw_bug_examples=True)
_, bug_eval_loader = self.get_dataloader(self.data_args, formatted_bug_batch=formatted_bug_examples, mode="eval")
examples_to_train = formatted_bug_examples[:]
# if (self.model_update_steps - last_steps) >= self.debugger_args.replay_frequency \
if self.timecode % self.debugger_args.replay_frequency == 0 \
and self.debugger_args.replay_frequency > 0 and self.debugger_args.replay_size > 0 \
and self.timecode > 0:
# sparse experience replay
self.logger.info("Triggering Sampling from Memory and starting to replay.")
self.logger.info(f"Current memroy_module size: {self.memroy_module.get_memory_size()}.")
if self.upstream_memroy_module:
self.logger.info(f"Current upstream_memroy_module size: {self.upstream_memroy_module.get_memory_size()}.")
if self.debugger_args.indexing_method == "biencoder":
# self.memroy_module.before_model = initial_model # if for longer-delta
self.upstream_memroy_module.before_model = initial_model
self.upstream_memroy_module.after_model = get_virtual_updated_model(self, bug_train_loader)
elif self.debugger_args.indexing_method == "bart_io_index":
# self.upstream_memroy_module.bart_model = initial_model
if self.debugger_args.upstream_sample_ratio > 0: # a seperate online memory module
self.memroy_module.bart_model = self.base_model
elif self.debugger_args.indexing_method == "bart_index":
# self.upstream_memroy_module.bart_model = initial_model
if self.debugger_args.upstream_sample_ratio > 0: # a seperate online memory module
self.memroy_module.bart_model = self.base_model
if self.debugger_args.use_mir:
assert self.debugger_args.replay_candidate_size >= self.debugger_args.replay_size
def mir_retrieve(mm, sample_size):
effective_cand_size = min(self.debugger_args.replay_candidate_size, mm.get_memory_size())
self.logger.info(f"effective_cand_size={effective_cand_size}")
each_sample_size = int(effective_cand_size*1.1/sample_size)
self.logger.info(f"each_sample_size={each_sample_size}")
assert effective_cand_size >= self.debugger_args.replay_size
retrieved_examples_candidates = mm.retrieve_from_memory(
query_examples=formatted_bug_examples,
sample_size=effective_cand_size,
rank_method=self.debugger_args.index_rank_method,
agg_method="each_topk_then_random",
each_sample_size=each_sample_size,
each_sim_sample_size=min(each_sample_size*5, mm.get_memory_size()), # only used for the bart-IO
)
if "mir_buffer_ids" not in result_dict:
result_dict["mir_buffer_ids"] = []
result_dict["mir_buffer_ids"] += [_id for (_input, _truth, _id) in retrieved_examples_candidates]
retrieved_examples = get_top_interfered_examples(self,
K=sample_size, candidate_examples=retrieved_examples_candidates, query_data_loader=bug_train_loader)
return retrieved_examples
if self.debugger_args.upstream_sample_ratio > 0:
upstream_sample_budget = int(self.debugger_args.upstream_sample_ratio * self.debugger_args.replay_size)
self.logger.info(f"Memory from upstream_memroy_module = {upstream_sample_budget}; ")
self.logger.info(f"Memory from memroy_module = {self.debugger_args.replay_size-upstream_sample_budget}; ")
retrieved_examples = []
if upstream_sample_budget > 0:
retrieved_examples += mir_retrieve(mm=self.upstream_memroy_module,
sample_size=upstream_sample_budget)
retrieved_examples += mir_retrieve(mm=self.memroy_module,
sample_size=self.debugger_args.replay_size-upstream_sample_budget)
else:
retrieved_examples = mir_retrieve(mm=self.memroy_module, sample_size=self.debugger_args.replay_size)
else:
each_sample_size=5
each_sim_sample_size=30
retrieved_examples = []
upstream_sample_budget = 0
if self.debugger_args.upstream_sample_ratio > 0:
upstream_sample_budget = int(self.debugger_args.upstream_sample_ratio * self.debugger_args.replay_size)
self.logger.info(f"Memory from upstream_memroy_module = {upstream_sample_budget}; ")
self.logger.info(f"Memory from memroy_module = {self.debugger_args.replay_size-upstream_sample_budget}; ")
retrieved_examples += self.upstream_memroy_module.retrieve_from_memory(
query_examples=formatted_bug_examples,
sample_size=upstream_sample_budget,
agg_method="each_topk_then_random",
rank_method=self.debugger_args.index_rank_method,
each_sample_size=each_sample_size, each_sim_sample_size=each_sim_sample_size)
retrieved_examples += self.memroy_module.retrieve_from_memory(
query_examples=formatted_bug_examples,
sample_size=self.debugger_args.replay_size-upstream_sample_budget,
agg_method="each_topk_then_random",
rank_method=self.debugger_args.index_rank_method,
each_sample_size=each_sample_size, each_sim_sample_size=each_sample_size*5)
# self.logger.info(f"retrieved_examples (index)={retrieved_examples}")
result_dict["retrieved_ids"] = [_id for (_input, _truth, _id) in retrieved_examples]
if self.debugger_args.use_replay_mix:
examples_to_train += retrieved_examples
self.logger.info(
f"Mixed the retrieved examples (len={len(retrieved_examples)}) to the current batch for training.")
else:
self.logger.info(
f"Replay-Training Start! Using the retrieved examples (len={len(retrieved_examples)}) ")
replay_data_loader, _ = self.get_dataloader(
self.data_args, retrieved_examples, mode="train")
self.fix_bugs(replay_data_loader, quiet=False) # sparse replay
self.logger.info("Replay-Training done.")
last_steps = self.model_update_steps
# Fix the bugs by mini-batch based "training"
self.logger.info(
f"Start error-fixing (len(examples_to_train)={len(examples_to_train)}) .... Timecode: {self.timecode}")
bug_train_loader, _ = self.get_dataloader(
self.data_args, examples_to_train, mode="train")
self.fix_bugs(bug_train_loader) # for debugging
self.logger.info("Start error-fixing .... Done!")
flag_store_examples = True
if flag_store_examples:
self.logger.info(
f"Saving the current error examples (len={len(formatted_bug_examples)}) to the memory.")
self.logger.info(f"Current memroy_module size: {self.memroy_module.get_memory_size()}.")
if self.upstream_memroy_module:
self.logger.info(f"Current upstream_memroy_module size: {self.upstream_memroy_module.get_memory_size()}.")
self.memroy_module.store_examples(formatted_bug_examples)
self.logger.info("Finished.")
############### CORE ###############
self.evaluate_error_fixing(result_dict, bug_eval_loader)
self._update_result_dict(result_dict)
if self.debugger_args.save_ckpt_freq > 0 and self.timecode % self.debugger_args.save_ckpt_freq == 0:
self._save_base_model()
self.save_result_file()
self.logger.info("-"*50)
self.timecode += 1
#### Final evaluation ####
self.final_evaluation()
#### Save the final model ####
self._save_base_model()
# Save to path
self.memroy_module.save_memory_to_path(self.debugger_args.memory_path)
|
CMR-main
|
cmr/debug_algs/index_based/cl_indexed_alg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from argparse import Namespace
from cmr.debug_algs.cl_utils import _keep_first_answer
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
from tqdm import tqdm
import torch
from cmr.debug_algs.index_based.index_utils import get_bart_dual_representation
from cmr.models.utils import trim_batch
import json
from transformers.modeling_bart import _prepare_bart_decoder_inputs
import numpy as np
import faiss
import pickle
import random
class BaseMemoryManager():
def __init__(self, logger):
super().__init__()
self.logger = logger
self.name = "base_memory_manager"
self.memory_examples = {}
def get_memory_size(self):
return len(self.memory_examples)
def _load_init_memory_examples(self, initial_memory_path="", formatted_examples=None, cut_off=None):
assert len(self.memory_examples) == 0
if initial_memory_path:
with open(initial_memory_path) as f:
initial_memory_examples = [json.loads(line)
for line in set(f.read().splitlines())][:]
initial_memory_examples = self.cl_utils.upstream_data_formatter(initial_memory_examples)
elif formatted_examples:
initial_memory_examples = formatted_examples
for item in initial_memory_examples:
# Note that we only keep the all answers here now.
self.memory_examples[item[2]] = (item[0], item[1], item[2])
self.logger.info(f"Set up the initial memory with {len(self.memory_examples)} examples.")
def set_up_initial_memory(self, initial_memory_path="", formatted_examples=None):
raise NotImplementedError
def load_memory_from_path(self, init_memory_cache_path):
raise NotImplementedError
def save_memory_to_path(self, memory_pkl_path):
raise NotImplementedError
def retrieve_from_memory(self, query_examples, sample_size, **kwargs):
raise NotImplementedError
def store_examples(self, examples):
raise NotImplementedError
class RandomMemoryManger(BaseMemoryManager):
### Mainly used for ER, MIR
def __init__(self, logger):
super().__init__(logger=logger)
self.logger = logger
self.name = "random_memory_manager"
self.memory_examples = {}
def set_up_initial_memory(self, initial_memory_path="", formatted_examples=None, cut_off=None):
self._load_init_memory_examples(initial_memory_path, formatted_examples, cut_off=None)
def load_memory_from_path(self, init_memory_cache_path):
with open(init_memory_cache_path, "rb") as f:
memory_cache = pickle.load(f)
self.logger.info(f"Load the cache to {f.name}")
self.memory_examples = memory_cache["memory_examples"]
def save_memory_to_path(self, memory_pkl_path):
memory_cache = {}
memory_cache["memory_examples"] = self.memory_examples
with open(memory_pkl_path, "wb") as f:
pickle.dump(memory_cache, f)
self.logger.info(f"Saved the cache to {f.name}")
def retrieve_from_memory(self, query_examples=None, sample_size=-1, **kwargs):
assert sample_size > 0
sample_size = min(sample_size, self.get_memory_size())
self.logger.info("Randomly retrieve from the memory. `query_examples` not used")
retrieved_example_ids = random.sample(list(self.memory_examples.keys()), sample_size)
retrieved_examples = [self.memory_examples[rid] for rid in retrieved_example_ids]
return retrieved_examples
def store_examples(self, examples):
for item in examples:
# Note that we only keep the all answers here now.
self.memory_examples[item[2]] = (item[0], item[1], item[2])
self.logger.info(f"Save {len(examples)} examples to the memory.")
class BartIndexManager(BaseMemoryManager):
def __init__(self, logger):
super().__init__(logger=logger)
self.logger = logger
self.name = "bart_index_manager"
self.memory_index = None
self.memory_examples = {}
self.bart_model = None
self.tokenizer = None
self.cl_utils = ContinualFinetuning(logger=logger)
self.data_args = None
self.dim_vector = 2*768
self.memory_index_sorted_ids = []
def set_up_data_args(self, args):
self.data_args = Namespace(
do_lowercase=args.do_lowercase,
append_another_bos=args.append_another_bos,
max_input_length=args.max_input_length,
max_output_length=args.max_output_length,
task_name=args.task_name,
train_batch_size=args.train_batch_size,
predict_batch_size=args.predict_batch_size,
)
def set_up_initial_memory(self, initial_memory_path="", formatted_examples=None, cut_off=None):
assert self.bart_model is not None
self._load_init_memory_examples(initial_memory_path, formatted_examples)
# build index
initial_memory_example_ids = sorted(list(self.memory_examples.keys()))[:cut_off]
examples = self.get_examples_by_ids(initial_memory_example_ids)
vectors = self.get_representation(examples)
self.update_index(initial_memory_example_ids, vectors)
def update_index(self, example_ids, vectors):
assert len(example_ids) == len(vectors)
if not self.memory_index:
self.memory_index = faiss.IndexFlatL2(self.dim_vector)
self.memory_index_sorted_ids += example_ids
# for ex_id in example_ids:
# self.memory_examples[ex_id]["memory_index_id"] = len(self.memory_index_sorted_ids)
# self.memory_index_sorted_ids.append(ex_id)
vectors = np.array(vectors)
faiss.normalize_L2(vectors)
self.memory_index.add(vectors)
def set_up_model(self, model, tokenizer):
del self.bart_model
del self.tokenizer
self.bart_model = model
self.tokenizer = tokenizer
def get_examples_by_ids(self, example_ids):
return [self.memory_examples[eid] for eid in example_ids]
def load_memory_from_path(self, init_memory_cache_path):
with open(init_memory_cache_path, "rb") as f:
memory_cache = pickle.load(f)
self.logger.info(f"Load the cache to {f.name}")
self.memory_index_sorted_ids = memory_cache["memory_index_sorted_ids"]
self.memory_index = memory_cache["memory_index"]
self.memory_examples = memory_cache["memory_examples"]
def save_memory_to_path(self, memory_pkl_path):
memory_cache = {}
memory_cache["memory_index_sorted_ids"] = self.memory_index_sorted_ids
memory_cache["memory_index"] = self.memory_index
memory_cache["memory_examples"] = self.memory_examples
with open(memory_pkl_path, "wb") as f:
pickle.dump(memory_cache, f)
self.logger.info(f"Saved the cache to {f.name}")
def search_index(self, query_vector, k=5):
q = np.array([query_vector])
faiss.normalize_L2(q)
D, I = self.memory_index.search(q, k)
retrieved_example_ids = [self.memory_index_sorted_ids[int(eid)] for eid in I[0]]
scores = [float(s) for s in D[0]]
return retrieved_example_ids, scores
def get_query_representation(self, query_examples):
return self.get_representation(query_examples)
def retrieve_from_memory(self, query_examples, sample_size, **kwargs):
input_vectors = self.get_query_representation(query_examples)
agg_method = kwargs.get("agg_method", "each_topk_then_random")
rank_method = kwargs.get("rank_method", "most_similar")
if agg_method == "each_topk_then_random":
each_sample_size = kwargs.get("each_sample_size", 5)
retrieved_example_ids = []
retrieved_scores = []
for query_vector in input_vectors:
ids, scores = self.search_index(query_vector, each_sample_size)
retrieved_example_ids += ids
retrieved_scores += scores
# retrieved_example_ids = set(retrieved_example_ids) # TODO: decide later.
# retrieved_example_ids = random.sample(retrieved_example_ids, sample_size)
sorted_retrieved_example_ids = [x for _, x in sorted(zip(retrieved_scores, retrieved_example_ids), reverse=False)]
retrieved_examples = self.get_examples_by_ids(sorted_retrieved_example_ids)
self.logger.info(f"index_manager.retrieve_from_memory --> len(retrieved_examples)={len(retrieved_examples)}")
return retrieved_examples[:sample_size]
def store_examples(self, examples):
example_ids = []
for item in examples:
self.memory_examples[item[2]] = item
example_ids.append(item[2])
vectors = self.get_representation(examples)
self.update_index(example_ids, vectors)
def get_representation(self, examples):
all_vectors = get_bart_dual_representation(cl_trainer=self.cl_utils,
bart_model=self.bart_model,
tokenizer=self.tokenizer,
data_args=self.data_args,
examples=examples,
agg_method="mean")
return all_vectors
def load_encoder_model(self, base_model_args):
self.cl_utils.load_base_model(base_model_args)
self.set_up_model(model=self.cl_utils.base_model, tokenizer=self.cl_utils.tokenizer)
if __name__ == '__main__':
from cmr.debug_algs import run_lifelong_finetune
parser = run_lifelong_finetune.get_cli_parser()
args = parser.parse_args()
debugging_alg, data_args, base_model_args, debugger_args, logger = run_lifelong_finetune.setup_args(
args)
args.predict_batch_size = 8
index_manager = BartIndexManager(logger=logger)
index_manager.set_up_data_args(args)
index_manager.load_encoder_model(base_model_args)
# index_manager.initial_memory_path = "exp_results/data_streams/mrqa.nq_train.memory.jsonl"
index_manager.initial_memory_path = "data/mrqa_naturalquestions/mrqa_naturalquestions_train.jsonl"
index_manager.set_up_initial_memory(index_manager.initial_memory_path, cut_off=None)
index_manager.save_memory_to_path("exp_results/data_streams/bart_index.upstream_memory.full.pkl")
# copy_item = [0,0,0]
# copy_item[2] = "mrqa_naturalquestions-train-10-copy"
# copy_item[0] = "Context: The movie was shot in LA and the Hawaiian islands of Bologno and Kologno between March 3 , 2020 and May 25 , 2020 . The movie is deliberately vague about which Hawaiian island its latter portion depicts ; thus , the characters hike across a rope bridge on Bologno and arrive in the next scene at a spectacular waterfall on Kologno , rather than the ordinary irrigation dam and pond on Bologno where the actual trail terminates . | Question: which did they hike in just go with it ?"
# copy_item[1] = ['Kauai ', 'Maui .']
# index_manager.store_examples([copy_item])
# # sanity check #
# # index_manager.load_memory_from_path("exp_results/data_streams/bart_index.init_memory.pkl")
# query_ids = ["mrqa_naturalquestions-train-10", "mrqa_naturalquestions-train-10600"]
# print(query_ids)
# print(index_manager.memory_examples[query_ids[0]])
# retrieved_exmaples = index_manager.retrieve_from_memory(query_examples=[
# index_manager.memory_examples[qid] for qid in query_ids], sample_size=15, each_sample_size=10, rank_method="most_similar", agg_method="each_topk_then_random")
# for item in retrieved_exmaples:
# print("-"*50)
# print(item[2])
# print(item[0])
# print(item[1])
|
CMR-main
|
cmr/debug_algs/index_based/index_manager.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from argparse import Namespace
from cmr.debug_algs.cl_utils import _keep_first_answer
from cmr.debug_algs.cl_simple_alg import ContinualFinetuning
from tqdm import tqdm
import torch
from cmr.debug_algs.index_based.index_manager import BartIndexManager, BaseMemoryManager
from cmr.debug_algs.index_based.index_utils import get_bart_dual_representation
from cmr.models.utils import trim_batch
import json
from transformers.modeling_bart import _prepare_bart_decoder_inputs
import numpy as np
import faiss
import pickle
import random
from scipy.spatial import distance
class BartIOIndexManager(BartIndexManager):
def __init__(self, logger):
super().__init__(logger=logger)
self.logger = logger
self.name = "bart_io_index_manager"
self.memory_index = {"input": None, "output": None}
self.dim_vector = 768
def set_up_data_args(self, args):
self.data_args = Namespace(
do_lowercase=args.do_lowercase,
append_another_bos=args.append_another_bos,
max_input_length=args.max_input_length,
max_output_length=args.max_output_length,
task_name=args.task_name,
train_batch_size=args.train_batch_size,
predict_batch_size=args.predict_batch_size,
)
def update_index(self, example_ids, vectors):
assert len(example_ids) == len(vectors)
if not self.memory_index["input"]:
self.memory_index["input"] = faiss.IndexFlatL2(self.dim_vector)
self.memory_index["output"] = faiss.IndexFlatL2(self.dim_vector)
self.memory_index_sorted_ids += example_ids
## add to input
input_vectors = np.array([v[:self.dim_vector] for v in vectors])
self.memory_index["input"].add(input_vectors)
## add to output
output_vectors = np.array([v[self.dim_vector:] for v in vectors])
self.memory_index["output"].add(output_vectors)
def search_index(self, query_vector, k=5, partition="input", return_index_ids=False):
if partition=="input":
query_vector = query_vector[:self.dim_vector]
elif partition=="output":
query_vector = query_vector[self.dim_vector:]
D, I = self.memory_index[partition].search(np.array([query_vector]), k)
scores = D[0]
if return_index_ids:
return I[0]
else:
retrieved_example_ids = [self.memory_index_sorted_ids[int(eid)] for eid in I[0]]
return retrieved_example_ids
def retrieve_from_memory(self, query_examples, sample_size, **kwargs):
input_vectors = self.get_query_representation(query_examples)
agg_method = kwargs.get("agg_method", "each_topk_then_random")
rank_method = kwargs.get("rank_method", "most_sim_input")
if agg_method == "each_topk_then_random":
each_sample_size = kwargs.get("each_sample_size", 5)
each_sim_sample_size = kwargs.get("each_sim_sample_size", 30)
retrieved_example_ids = []
retrieved_example_scores = []
for query_vector in input_vectors:
sim_input_index_ids = self.search_index(query_vector, each_sim_sample_size, partition="input", return_index_ids=True)
if rank_method == "most_sim_input":
retrieved_ids = sim_input_index_ids
elif rank_method == "most_sim_input_most_diff_output":
sim_output_vectors = [self.memory_index["output"].reconstruct(int(eid)) for eid in sim_input_index_ids]
query_output_vector = query_vector[self.dim_vector:]
distances = [distance.cosine(query_output_vector, s) for s in sim_output_vectors]
retrieved_ids = [int(x) for _, x in sorted(zip(distances, sim_input_index_ids), reverse=True)]
retrieved_example_ids += [self.memory_index_sorted_ids[int(eid)] for eid in retrieved_ids][:each_sample_size]
# retrieved_example_scores += # TODO:
self.logger.info(f"IO index -- retrieved_example_ids={len(retrieved_example_ids)}")
retrieved_examples = self.get_examples_by_ids(retrieved_example_ids)
retrieved_examples = random.sample(retrieved_examples, sample_size) # TODO: consider ranking
# retrieved_examples = retrieved_examples[:sample_size]
return retrieved_examples
if __name__ == '__main__':
from cmr.debug_algs import run_lifelong_finetune
parser = run_lifelong_finetune.get_cli_parser()
args = parser.parse_args()
debugging_alg, data_args, base_model_args, debugger_args, logger = run_lifelong_finetune.setup_args(
args)
base_model_args.base_model_path = "out/mrqa_squad_bart-base_1029_upstream_model//best-model.pt"
args.predict_batch_size = 8
index_manager = BartIOIndexManager(logger=logger)
index_manager.set_up_data_args(args)
index_manager.load_encoder_model(base_model_args)
# index_manager.initial_memory_path = "exp_results/data_streams/mrqa.nq_train.memory.jsonl"
# index_manager.set_up_initial_memory(index_manager.initial_memory_path, cut_off=None)
# index_manager.save_memory_to_path("exp_results/data_streams/bart_io_index.sample_init_memory.pkl")
index_manager.initial_memory_path = "data/mrqa_squad/mrqa_squad_train.jsonl"
index_manager.set_up_initial_memory(index_manager.initial_memory_path, cut_off=None)
index_manager.save_memory_to_path("experiments/eval_data/qa/bart_io_index.init_memory.pkl")
# query_ids = ["mrqa_squad-train-10"]
# print(index_manager.memory_examples[query_ids[0]])
# retrieved_exmaples = index_manager.retrieve_from_memory(query_examples=[
# index_manager.memory_examples[qid] for qid in query_ids], each_sample_size=10, sample_size=10, rank_method="most_sim_input")
# for item in retrieved_exmaples:
# print("-"*50)
# print(item[2])
# print(item[0])
# print(item[1])
|
CMR-main
|
cmr/debug_algs/index_based/IO_each_index.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import argparse
from logging import Logger
import logging
from torch.cuda import memory
from tqdm.utils import disp_trim
from cmr.debug_algs.index_based.index_manager import BartIndexManager
import torch
from torch import Tensor, combinations, normal
import torch.nn as nn
from torch.nn import functional as F
from torch.nn import Module
import random
import glob
import os
import pickle
from tqdm import tqdm
import numpy as np
import faiss
import json
import wandb
from cmr.debug_algs.index_based.index_utils import get_bart_dual_representation
from cmr.models.run_bart import train
from cmr.models.utils import set_seeds
from faiss import normalize_L2
def load_distant_supervision(folder_name, sample_size=1, logger=None, specified_names=None, exclude_files=[], train_args=None):
pkl_files = glob.glob(os.path.join(folder_name, '*.pkl'))[:]
pkl_files = [f for f in pkl_files if f not in exclude_files]
if specified_names:
pkl_files = [p for p in pkl_files if p.split(
"/")[-1].replace(".pkl", "") in specified_names]
else:
pkl_files = random.choices(pkl_files, k=sample_size)
ds_items = []
logger.info(f"Loading {pkl_files}")
for pkl_path in tqdm(pkl_files, desc="loading pkl files"):
with open(pkl_path, "rb") as f:
ds_items += pickle.load(f)
for item in ds_items:
for q in item["query"]:
original_dim = len(item["query"][q])
if train_args.query_only_after:
item["query"][q] = item["query"][q][original_dim//2:]
if train_args.query_only_before:
item["query"][q] = item["query"][q][:original_dim//2]
if train_args.query_delta:
before = item["query"][q][original_dim//2:]
after = item["query"][q][:original_dim//2]
item["query"][q] = before + [i-j for i, j in zip(before, after)]
# np.random.seed(42)
# # # For Debugging the data-distribution #
# print("generating random data")
# for item in ds_items:
# for q in item["query"]:
# item["query"][q] = np.random.normal(0, 0.1, 768*2*2)
# for q in item["positive"]:
# item["positive"][q] = np.random.normal(0, 0.1, 768*2)
# for q in item["negative"]:
# item["negative"][q] = np.random.normal(0.6, 0.1, 768*2)
# pass
# if exclude_files:
# np.random.seed(45)
# print("generating purturbs on the test data")
# for item in ds_items:
# for q in item["query"]:
# item["query"][q] += np.random.normal(0, 5e-2, 768*2*2)
# for q in item["positive"]:
# item["positive"][q] += np.random.normal(0, 5e-2, 768*2)
# for q in item["negative"]:
# item["negative"][q] += np.random.normal(0, 5e-2, 768*2)
return ds_items, pkl_files
class MLP(Module):
def __init__(self, input_dim, output_dim, hidden_dim, droprate=0):
super().__init__()
if hidden_dim > 0:
self.layers = torch.nn.Sequential(
# torch.nn.Flatten(),
# nn.BatchNorm1d(input_dim),
# nn.LayerNorm(input_dim),
nn.Linear(input_dim, hidden_dim),
# nn.LayerNorm(hidden_dim),
# nn.Sigmoid(),
nn.Dropout(droprate),
nn.ReLU(),
nn.Linear(hidden_dim, output_dim),
)
else:
self.layers = torch.nn.Sequential(
# torch.nn.Flatten(),
# nn.BatchNorm1d(input_dim),
# nn.Linear(input_dim, hidden_dim),
# nn.BatchNorm1d(hidden_dim),
# nn.ReLU(),
# nn.Sigmoid(),
# nn.Dropout(droprate),
# nn.Linear(hidden_dim, output_dim),
nn.BatchNorm1d(input_dim),
# nn.LayerNorm(input_dim),
nn.Linear(input_dim, output_dim),
)
self.init_weights()
def init_weights(self):
for module in self.layers:
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self, X):
X = self.layers(X)
# X = normalize(X)
return X
def create_batch_from_groups(groups, qry_size=1, pos_size=1, neg_size=1, seen_query_ids=None, seen_memory_ids=None, query_mean=True):
if query_mean:
qry_sample_size = qry_size
qry_size = 1 # effective qry size
else:
qry_sample_size = qry_size
queries, candidates, targets = [], [], []
for group in groups:
# TODO: this is overly recorded..
if seen_query_ids is not None:
seen_query_ids.update(set(list(group["query"].keys())))
if seen_memory_ids is not None:
seen_memory_ids.update((set(list(group["positive"].keys()))))
seen_memory_ids.update((set(list(group["negative"].keys()))))
# queries.append(random.choice(list(group["query"].values())))
selected_queries = random.choices(list(group["query"].values()), k=qry_sample_size)
if query_mean:
selected_queries = np.array(selected_queries)
queries.append(np.mean(selected_queries, axis=0))
else:
queries += selected_queries
target = len(candidates)
candidates += random.choices(list(group["positive"].values()), k=pos_size) # for training, it must be a single positive
candidates += random.choices(list(group["negative"].values()), k=neg_size)
if pos_size > 1:
targets += [list(range(target, target+pos_size))] * qry_size # N*C
elif pos_size == 1:
targets += [target] * qry_size # N*1
assert len(queries) == len(targets) == len(groups) * qry_size
assert len(candidates) == len(groups) * (pos_size + neg_size)
if pos_size > 1:
return np.array(queries), np.array(candidates), targets
else:
return np.array(queries), np.array(candidates), np.array(targets)
class BiEncoderIndexManager(BartIndexManager):
def __init__(self, logger):
super().__init__(logger=logger)
self.logger = logger
self.name = "biencoder_index_manager"
self.query_input_dim = 768*2*2
self.memory_input_dim = 768*2
self.hidden_dim = 512
self.dim_vector = 256 # final dim
self.memory_encoder = None
self.query_encoder = None
self.train_args = None
# cl
self.before_model = None
self.after_model = None
def load_encoder_model(self, base_model_args, memory_encoder_path, query_encoder_path):
super().load_encoder_model(base_model_args)
if self.memory_encoder is None:
self.init_biencoder_modules()
self.memory_encoder.load_state_dict(torch.load(memory_encoder_path))
self.query_encoder.load_state_dict(torch.load(query_encoder_path))
self.logger.info(f"Loading bi-encoders.memory_encoder from {memory_encoder_path}")
self.logger.info(f"Loading bi-encoders.query_encoder from {query_encoder_path}")
def init_biencoder_modules(self):
self.query_input_dim = self.train_args.query_input_dim
self.memory_input_dim = self.train_args.memory_input_dim
self.hidden_dim = self.train_args.hidden_dim
self.dim_vector = self.train_args.dim_vector
self.memory_encoder = MLP(self.memory_input_dim, self.dim_vector,
self.hidden_dim, droprate=self.train_args.droprate)
self.query_encoder = MLP(self.query_input_dim, self.dim_vector,
self.hidden_dim, droprate=self.train_args.droprate)
def get_representation(self, examples):
"""only for the memory encoding here"""
bart_reps = super().get_representation(examples)
bart_reps = np.array(bart_reps)
self.memory_encoder.eval()
all_vectors = self.memory_encoder(torch.Tensor(bart_reps)).detach().numpy()
return all_vectors
def train_biencoder(self, train_data, eval_data):
trainable_params = list(self.query_encoder.parameters()) + \
list(self.memory_encoder.parameters())
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
self.logger.info(f"# params of query_encoder = {count_parameters(self.query_encoder)}")
self.logger.info(f"# params of memory_encoder = {count_parameters(self.memory_encoder)}")
optimizer = torch.optim.Adam(trainable_params, lr=self.train_args.lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1000, gamma=0.99, last_epoch=-1)
gradient_acc_steps = 1
if self.train_args.use_cuda:
self.query_encoder.to(torch.device("cuda"))
self.memory_encoder.to(torch.device("cuda"))
seen_query_ids = set()
seen_memory_ids = set()
eval_at_K = 16
best_eval_acc = self.eval_func_v1(
eval_data, k=eval_at_K, seen_query_ids=seen_query_ids, seen_memory_ids=seen_memory_ids)
self.logger.info(f"Valid Acc @ 0: Top-{eval_at_K} acc: {best_eval_acc}")
if self.train_args.wandb:
wandb.log({"valid_accuracy": best_eval_acc}, step=0)
wandb.log({"best_valid_acc": best_eval_acc}, step=0)
losses = []
no_up = 0
for _step in tqdm(range(self.train_args.n_steps), desc="Training steps"):
self.memory_encoder.train()
self.query_encoder.train()
# self.logger.info(f"Training step {_step}/{self.train_args.n_steps}")
sampled_groups = random.choices(train_data, k=self.train_args.batch_size)
queries, candidates, targets = create_batch_from_groups(
sampled_groups,
qry_size=self.train_args.qry_size,
pos_size=self.train_args.pos_size,
neg_size=self.train_args.neg_size,
seen_query_ids=seen_query_ids, seen_memory_ids=seen_memory_ids,
query_mean=self.train_args.use_query_mean)
optimizer.zero_grad()
qry_tensors = torch.Tensor(queries)
mem_tensors = torch.Tensor(candidates)
if self.train_args.use_cuda:
qry_tensors = qry_tensors.to(torch.device("cuda"))
mem_tensors = mem_tensors.to(torch.device("cuda"))
query_inputs = self.query_encoder(qry_tensors)
memory_inputs = self.memory_encoder(mem_tensors)
scores = torch.matmul(query_inputs, memory_inputs.transpose(0, 1))
if self.train_args.pos_size == 1:
tgt_tensors = torch.LongTensor(targets)
if self.train_args.use_cuda:
tgt_tensors = tgt_tensors.to(torch.device("cuda"))
loss = F.cross_entropy(scores, tgt_tensors, reduction="mean")
elif self.train_args.pos_size > 1:
multi_hot_targets = []
for target in targets:
labels = torch.LongTensor(target)
labels = labels.unsqueeze(0)
multi_hot_targets.append(torch.zeros(labels.size(0), len(candidates)).scatter_(1, labels, 1.))
multi_hot_targets = torch.stack(multi_hot_targets, dim=1)
multi_hot_targets = multi_hot_targets.view(scores.size())
tgt_tensors = torch.Tensor(multi_hot_targets)
criterion = torch.nn.BCEWithLogitsLoss(reduction="mean")
if self.train_args.use_cuda:
tgt_tensors = tgt_tensors.to(torch.device("cuda"))
loss = criterion(scores, tgt_tensors)
# self.logger.info(f"loss.item()={loss.item()};")
losses.append(loss.item())
loss.backward()
if self.train_args.wandb:
wandb.log({"lr": float(optimizer.param_groups[0]['lr'])}, step=_step)
wandb.log({"loss": float(loss)}, step=_step)
wandb.log({"avg_loss": float(sum(losses)/len(losses))}, step=_step)
# clip
torch.nn.utils.clip_grad_norm_(trainable_params, 1.0)
optimizer.step()
scheduler.step()
# self.logger.info(f"self.query_encoder.layers[0].weight = {self.query_encoder.layers[0].weight}")
# self.logger.info(f"self.memory_encoder.layers[0].weight = {self.memory_encoder.layers[0].weight}")
if _step > 0 and _step % self.train_args.eval_per_steps == 0:
self.logger.info(f"---- Completed epoch with avg training loss {sum(losses)/len(losses)}.")
train_acc = self.eval_func_v1(train_data[:], k=eval_at_K)
self.logger.info(
f"Train Acc: Top-{eval_at_K} acc @ {_step}: {train_acc} | ")
valid_acc = self.eval_func_v1(eval_data, k=eval_at_K, seen_query_ids=seen_query_ids, seen_memory_ids=seen_memory_ids)
best_eval_acc = max(best_eval_acc, valid_acc)
if self.train_args.wandb:
wandb.log({"train_accuracy": train_acc}, step=_step)
wandb.log({"valid_accuracy": valid_acc}, step=_step)
wandb.log({"best_valid_acc": best_eval_acc}, step=_step)
self.logger.info(
f"Valid ACc: Top-{eval_at_K} acc @ {_step}: {valid_acc} | best_eval_acc={best_eval_acc}")
if best_eval_acc == valid_acc:
self.logger.info("new record; saving the biencoder ckpts.")
no_up = 0
elif best_eval_acc > valid_acc:
no_up += 1
if no_up >= self.train_args.patience:
break
if self.train_args.save_ckpt:
self.save_biencoder()
def eval_func_v2(self, eval_data, k=None, seen_query_ids=None, seen_memory_ids=None, filter=False):
# based on pair-wise comparisions
self.query_encoder.eval()
self.memory_encoder.eval()
eval_scores = []
for group in eval_data:
queries, candidates, targets = create_batch_from_groups([group], qry_size=16, pos_size=8, neg_size=8)
# query_inputs = self.query_encoder(torch.Tensor(queries))
# memory_inputs = self.memory_encoder(torch.Tensor(candidates))
qry_tensors = torch.Tensor(queries)
mem_tensors = torch.Tensor(candidates)
if self.train_args.use_cuda:
qry_tensors = qry_tensors.to(torch.device("cuda"))
mem_tensors = mem_tensors.to(torch.device("cuda"))
query_inputs = self.query_encoder(qry_tensors)
memory_inputs = self.memory_encoder(mem_tensors)
scores = torch.matmul(query_inputs, memory_inputs.transpose(0, 1))
querywise_scores = []
for qid in range(len(queries)):
pairwise_comp = []
pos_start = 0 # always 0
pos_end = pos_start + 8
neg_start = pos_end
neg_end = neg_start + 8
for pos_ind in range(pos_start, pos_end):
for neg_ind in range(neg_start, neg_end):
score_pos = scores[qid][pos_ind]
score_neg = scores[qid][neg_ind]
pairwise_comp.append(int(score_pos > score_neg))
pairwise_score = np.mean(pairwise_comp)
querywise_scores.append(pairwise_score)
group_score = np.mean(querywise_scores)
eval_scores.append(group_score)
return np.mean(eval_scores)
def eval_func_v1(self, eval_data, k=5, seen_query_ids=None, seen_memory_ids=None, filter=False):
top_k_accs = []
tested_query_ids = set()
tested_memory_ids = set()
for item in eval_data:
query_vectors = []
query_ids = []
for qry_id, qry_vec in item["query"].items():
if filter and seen_query_ids is not None and qry_id in seen_query_ids:
# Remove the seen qry ids
continue
query_ids.append(qry_id)
query_vectors.append(qry_vec)
if len(query_ids) == 0:
continue
tested_query_ids.update(query_ids)
positive_ids = set()
all_candidaites = []
all_candidate_vectors = []
for ex_id, vector in item["positive"].items():
positive_ids.add(ex_id)
memory_items = list(item["negative"].items()) + list(item["positive"].items())
random.shuffle(memory_items) # to avoid the case where they have the same scores
for ex_id, vector in memory_items:
all_candidaites.append(ex_id)
tested_memory_ids.add(ex_id)
all_candidate_vectors.append(vector)
if filter and seen_memory_ids is not None and ex_id in seen_memory_ids:
# Remove the seen memory ids
continue
all_candidaites.append(ex_id)
tested_memory_ids.add(ex_id)
all_candidate_vectors.append(vector)
# all_candidate_vectors.append([v-1 for v in vector]) # DEBUG:
query_vectors = np.array(query_vectors)
all_candidate_vectors = np.array(all_candidate_vectors)
self.query_encoder.eval()
self.memory_encoder.eval()
q_inputs = torch.Tensor(query_vectors)
m_inputs = torch.Tensor(all_candidate_vectors)
if self.train_args.use_cuda:
q_inputs = q_inputs.to(torch.device("cuda"))
m_inputs = m_inputs.to(torch.device("cuda"))
q = self.query_encoder(q_inputs).detach().cpu().numpy()
m = self.memory_encoder(m_inputs).detach().cpu().numpy()
memory_index = faiss.IndexFlatL2(m.shape[1])
memory_index.add(m)
Ds, Is = memory_index.search(q, k)
for index_list in Is:
retrieved_top_ids = [all_candidaites[ind] for ind in index_list]
top_k_accs.append(len([x for x in retrieved_top_ids if x in positive_ids])/k)
del memory_index
if seen_query_ids is not None:
coverage = len(tested_query_ids & seen_query_ids)/len(tested_query_ids)
self.logger.info(f"#tested_query_ids={len(tested_query_ids)}; coverage={coverage}")
if seen_memory_ids is not None:
coverage = len(tested_memory_ids & seen_memory_ids)/len(tested_memory_ids)
self.logger.info(f"#tested_memory_ids={len(tested_memory_ids)}; coverage={coverage}")
# self.logger.info(f"top_k_accs = {top_k_accs}; ")
return np.mean(top_k_accs)
def save_biencoder(self, query_encoder_path=None, memory_encoder_path=None):
if not query_encoder_path:
query_encoder_path = self.train_args.query_encoder_path
if not memory_encoder_path:
memory_encoder_path = self.train_args.memory_encoder_path
def save_module(module, path):
model_state_dict = {k: v.cpu() for (
k, v) in module.state_dict().items()}
torch.save(model_state_dict, path)
self.logger.info(f"Model saved to {path}.")
save_module(self.query_encoder, query_encoder_path)
save_module(self.memory_encoder, memory_encoder_path)
def get_query_representation(self, query_examples):
"""Using the concatenation"""
before_all_vectors = get_bart_dual_representation(cl_trainer=self.cl_utils,
bart_model=self.before_model,
tokenizer=self.tokenizer,
data_args=self.data_args,
examples=query_examples)
after_all_vectors = get_bart_dual_representation(cl_trainer=self.cl_utils,
bart_model=self.after_model,
tokenizer=self.tokenizer,
data_args=self.data_args,
examples=query_examples)
bart_reps = []
for b, a in zip(before_all_vectors, after_all_vectors):
bart_reps.append(list(b)+list(a))
bart_reps = np.array(bart_reps)
self.query_encoder.eval()
all_vectors = self.query_encoder(torch.Tensor(bart_reps)).detach().numpy()
return all_vectors
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--ds_dir_path",
default="exp_results/supervision_data/1020_dm_simple/")
parser.add_argument("--num_ds_train_file", type=int, default=24)
parser.add_argument("--num_ds_dev_file", type=int, default=8)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--run_mode", type=str, default="train") # TODO:
parser.add_argument("--query_encoder_path", type=str,
default="exp_results/supervision_data/$prefix.qry_encoder.pt")
parser.add_argument("--memory_encoder_path", type=str,
default="exp_results/supervision_data/$prefix.mem_encoder.pt")
parser.add_argument("--memory_index_path", type=str,
default="exp_results/supervision_data/$prefix.memory.index")
parser.add_argument("--train_args_path", type=str,
default="exp_results/supervision_data/$prefix.train_args.json")
# train_args
parser.add_argument("--query_input_dim", type=int, default=768*2*2)
parser.add_argument("--memory_input_dim", type=int, default=768*2)
parser.add_argument("--hidden_dim", type=int, default=-1) # -1 means no hidden layer; 256 for example
parser.add_argument("--dim_vector", type=int, default=128)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--n_steps", type=int, default=8000)
parser.add_argument("--eval_per_steps", type=int, default=100)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--qry_size", type=int, default=8) # 1-16
parser.add_argument("--pos_size", type=int, default=16) # 1-8
parser.add_argument("--neg_size", type=int, default=1) # 1-8
parser.add_argument("--patience", type=int, default=8)
parser.add_argument("--droprate", type=float, default=0)
parser.add_argument('--use_query_mean', default=True, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--run_name', default="1020_dm_simple", type=str)
parser.add_argument('--save_ckpt', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--use_cuda', default=True, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--wandb', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--query_only_after', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--query_only_before', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
parser.add_argument('--query_delta', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']))
return parser
if __name__ == '__main__':
biencoder_args = get_parser().parse_args()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
if biencoder_args.wandb and biencoder_args.run_mode == "train":
run = wandb.init(reinit=True, project="FAIR_Biencoder", settings=wandb.Settings(start_method="fork"))
run_name = wandb.run.name
biencoder_args.run_name = run_name
logger.info(f"run_name = {run_name}")
biencoder_args.query_encoder_path = biencoder_args.query_encoder_path.replace("$prefix", biencoder_args.run_name)
biencoder_args.memory_encoder_path = biencoder_args.memory_encoder_path.replace("$prefix", biencoder_args.run_name)
biencoder_args.memory_index_path = biencoder_args.memory_index_path.replace("$prefix", biencoder_args.run_name)
biencoder_args.train_args_path = biencoder_args.train_args_path.replace("$prefix", biencoder_args.run_name)
set_seeds(biencoder_args.seed)
if biencoder_args.run_mode == "train":
with open(biencoder_args.train_args_path, "w") as f:
json.dump(vars(biencoder_args), f)
if biencoder_args.wandb:
wandb.config.update(biencoder_args)
if biencoder_args.query_only_after or biencoder_args.query_only_before:
# or biencoder_args.query_delta
biencoder_args.query_input_dim = biencoder_args.query_input_dim // 2
train_data, train_files = load_distant_supervision(
biencoder_args.ds_dir_path, sample_size=biencoder_args.num_ds_train_file, logger=logger, train_args=biencoder_args)
logger.info(f"num_groups = {len(train_data)}")
eval_data, eval_files = load_distant_supervision(
biencoder_args.ds_dir_path, sample_size=biencoder_args.num_ds_dev_file, logger=logger, exclude_files=train_files, train_args=biencoder_args)
biencoder_memory_module = BiEncoderIndexManager(logger)
biencoder_memory_module.train_args = biencoder_args
biencoder_memory_module.init_biencoder_modules()
biencoder_memory_module.train_biencoder(train_data, eval_data)
if biencoder_args.save_ckpt:
biencoder_memory_module.save_biencoder(
biencoder_args.query_encoder_path, biencoder_args.memory_encoder_path)
run.finish()
elif biencoder_args.run_mode == "index":
# json.dump(vars(biencoder_args), open(biencoder_args.train_args_path, "w"))
with open(biencoder_args.train_args_path, "r") as f:
backup_args = json.load(f)
biencoder_args.hidden_dim = backup_args["hidden_dim"]
biencoder_args.query_input_dim = backup_args["query_input_dim"]
biencoder_args.memory_input_dim = backup_args["memory_input_dim"]
biencoder_args.hidden_dim = backup_args["hidden_dim"]
biencoder_args.dim_vector = backup_args["dim_vector"]
biencoder_args.use_query_mean = backup_args["use_query_mean"]
from cmr.debug_algs import run_lifelong_finetune
parser = run_lifelong_finetune.get_cli_parser()
cl_args = parser.parse_args("")
debugging_alg, data_args, base_model_args, debugger_args, logger = run_lifelong_finetune.setup_args(
cl_args)
cl_args.predict_batch_size = 8
index_manager = BiEncoderIndexManager(logger)
index_manager.train_args = biencoder_args
index_manager.set_up_data_args(cl_args)
index_manager.load_encoder_model(
base_model_args, biencoder_args.memory_encoder_path, biencoder_args.query_encoder_path)
index_manager.initial_memory_path = "data/mrqa_naturalquestions/mrqa_naturalquestions_train.jsonl"
index_manager.set_up_initial_memory(index_manager.initial_memory_path)
index_manager.save_memory_to_path("exp_results/data_streams/1021_biencoder_init_memory.pkl")
|
CMR-main
|
cmr/debug_algs/index_based/biencoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import altair as alt
from altair.vegalite.v4.schema.core import Axis, Legend
def draw_curve(df, y_scale=[0, 1], fig_title="", y_title="Y Title", x_key="timecode", y_key="em:Q", height=800, width=1150, x_scale=[0, 100], color_dom=None, color_range=None, orient="top-right"):
df = df[(df["timecode"] <= x_scale[1]) & (df["timecode"] >= x_scale[0])]
x = alt.X(x_key, type="ordinal", title="", axis=alt.Axis(tickCount=10, grid=False))
if color_dom and color_range:
color=alt.Color('prefix:N', scale=alt.Scale(domain=color_dom, range=color_range), sort=color_dom)
color_wo_lengend = alt.Color('prefix:N', scale=alt.Scale(domain=color_dom, range=color_range), sort=color_dom, legend=None)
shape=alt.Shape('prefix:N', sort=color_dom)
shape_wo_legend = shape=alt.Shape('prefix:N', sort=color_dom, legend=None)
else:
color=alt.Color('prefix:N', )
color_wo_lengend = alt.Color('prefix:N', legend=None)
shape=alt.Shape('prefix:N', )
shape_wo_legend = shape=alt.Shape('prefix:N', legend=None)
# scale=alt.Scale(range=['cross', 'circle', 'square', 'triangle-right', 'diamond'])
y=alt.Y(y_key, stack=None, title="", scale=alt.Scale(domain=y_scale), axis=alt.Axis(tickCount=10, grid=False))
points = alt.Chart(df).mark_point(opacity=0.8, filled=True, size=350).encode(x=x, y=y, shape=shape ,color=color).properties(title=fig_title)
lines = alt.Chart(df).mark_line(point=False).encode(x=x, y=y, color=color).properties(title=fig_title)
fig = alt.layer(points, lines).resolve_scale(color="independent", shape="independent")
# fig = points
fig = fig.properties(width=width, height=height).configure_axis(
labelFontSize=30,
titleFontSize=30,
).configure_view(stroke="black", strokeWidth=3)
if orient != "none":
fig = fig.configure_legend(titleFontSize=0, labelFontSize=30, symbolSize=300, orient=orient, strokeColor='gray',
fillColor='#EEEEEE',
padding=5,
cornerRadius=3,).configure_title(
fontSize=30,
font='Courier',
anchor='middle',
orient="top", align="center",
color='black'
)
return fig
def draw_stacked_bars(df, y_scale=[0, 1], fig_title="", y_title="Y Title", x_key="timecode", y_key="em:Q", height=800, width=1150, x_scale=[0, 100], bin_width=10, color_dom=None, color_range=None):
if color_dom and color_range:
color=alt.Color('prefix:N', scale=alt.Scale(domain=color_dom, range=color_range))
else:
color=alt.Color('prefix:N')
dom = ['Europe', 'Japan', 'USA']
rng = ['red', 'green', 'black']
fig = alt.Chart(df).mark_bar().encode(x=alt.X(x_key, title="Time Step", axis=alt.Axis(tickMinStep=10, tickOffset=0, tickWidth=5,), scale=alt.Scale(domain=x_scale)),
y=alt.Y(y_key, title=y_title, scale=alt.Scale(domain=y_scale)),
color=color,).properties(title=fig_title)
fig = alt.layer(fig).resolve_scale()
fig = fig.properties(width=width, height=height).configure_title(fontSize=50,
).configure_bar(binSpacing=0, width=bin_width).configure_axis(
labelFontSize=25,
titleFontSize=25,
).configure_legend(titleFontSize=0, labelFontSize=30, orient='top-left', strokeColor='gray',
fillColor='#EEEEEE',
padding=5,
cornerRadius=3,).configure_title(
fontSize=30,
font='Courier',
anchor='middle',
orient="top", align="center",
color='black'
)
return fig
def draw_grouped_bars(df, y_scale=[0, 1], fig_title="", y_title="Y Title", x_key="timecode", group_key="", y_key="em:Q", height=800, width=1150, x_scale=[0, 100], bin_width=10, color_dom=None, color_range=None, orient = "none"):
if color_dom and color_range:
color=alt.Color(x_key, scale=alt.Scale(domain=color_dom, range=color_range), sort=color_dom, legend=None)
else:
color=alt.Color(x_key)
bars = alt.Chart(df).mark_bar(clip=True).encode(x=alt.X(x_key, title="CL Method", sort=color_dom),
y=alt.Y(y_key, title=y_title, scale=alt.Scale(domain=y_scale), axis=alt.Axis(grid=False)),
color=color).properties(title=fig_title)
text = bars.mark_text(
align='left',
baseline='middle',
dx=3 # Nudges text to right so it doesn't appear on top of the bar
).encode(
text=y_key
)
fig = bars
# fig = alt.layer(fig).resolve_scale()
fig = fig.properties(width=width, height=height).configure_title(fontSize=0,
).configure_bar(binSpacing=0, width=bin_width).configure_axis(
labelFontSize=10,
titleFontSize=10,
)
if orient != "none":
fig = fig.configure_legend(titleFontSize=0, labelFontSize=30, orient='top-left', strokeColor='gray',
fillColor='#EEEEEE',
padding=5,
cornerRadius=3,)
fig = fig.configure_title(
fontSize=10,
font='Courier',
anchor='middle',
orient="top", align="center",
color='black'
)
return fig
|
CMR-main
|
cmr/notebooks/draw_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from enum import unique
from posixpath import split
from re import L
import datasets
import numpy as np
import os
import gzip
import sys
import json
def show_statistics(lines):
len_list = []
for l in lines:
item = json.loads(l)
len_list.append(len(item["input"].split()))
print(np.min(len_list), np.max(len_list),
np.mean(len_list), np.median(len_list))
return
def escape(s):
# TODO: remove the markups
filter_words = ["</P>", "<P>", "<Li>", "</Li>", "<Ul>", "</Ul>", "[DOC]", "[TLE]", "[PAR]", "[SEP]", "\n", "\t"]
for fw in filter_words:
s = s.replace(fw, "")
return s.strip()
# Filtering the bad examples.
def example_pass(context):
if "<table>" in context.lower() or "<td>" in context.lower():
return False
else:
return True
def add_qmark(s):
return s if s.endswith("?") else s + " ?"
def write_to_jsonl(lst, out_file):
with open(out_file, "w") as fout:
fout.write("\n".join(lst))
# for line in lst:
# fout.write("{}\t{}\n".format(line[0], line[1]))
def deduplicate(lines):
seen_inputs = set()
unique_lines = []
for line in lines:
# print(line)
item = json.loads(line)
if item['input'] not in seen_inputs:
unique_lines.append(line)
seen_inputs.add(f"{item['input']}")
# result = list(set(lines))
print("deduplicate", len(lines), len(unique_lines))
return unique_lines
class TextToTextDataset():
def get_all_lines(self, dataset):
train_lines = deduplicate(self.map_to_list(dataset, "train"))
val_lines = deduplicate(self.map_to_list(dataset, "validation"))
test_lines = deduplicate(self.map_to_list(dataset, "test"))
show_statistics(train_lines)
show_statistics(val_lines)
# TODO: de-duplicate the lines!
return train_lines, val_lines, test_lines
def write_dataset(self, path):
"""
return train, dev, test
"""
# load dataset
dataset = self.load_dataset()
# formulate into list (for consistency in np.random)
train_lines, val_lines, test_lines = self.get_all_lines(dataset)
# shuffle the data
# np.random.seed(seed)
# np.random.shuffle(train_lines)
os.makedirs(os.path.join(path, self.task_identifier), exist_ok=True)
prefix = os.path.join(path, self.task_identifier,
"{}".format(self.task_identifier))
write_to_jsonl(train_lines, prefix + "_train.jsonl")
write_to_jsonl(val_lines, prefix + "_dev.jsonl")
if test_lines:
write_to_jsonl(test_lines, prefix + "_test.jsonl")
class MRQA(TextToTextDataset):
def __init__(self, task_identifier="mrqa", subset="SQuAD", mrqa_path="data/mrqa"):
self.task_identifier = task_identifier + "_" + subset.lower()
self.mrqa_path = mrqa_path
self.subset = subset
def map_to_list(self, dataset, split_name):
if split_name not in dataset:
print("No such split_name:", split_name)
return []
lines = []
for datapoint in dataset[split_name]:
if not datapoint["answers"]:
print("empty answer")
continue
if not example_pass(datapoint["context"]):
continue
# add question mark!
# lines.append(("Context: " + escape(datapoint["context"]) +
# " | Question: " +
# add_qmark(escape(datapoint["question"])),
# "\t".join([escape(a) for a in datapoint["answers"]])))
# _input = "Context: " + escape(datapoint["context"]) + \
# " | " + "Question: " + add_qmark(escape(datapoint["question"]))
# TODO: need re-training
_input = f'Question: {add_qmark(escape(datapoint["question"]))} </s> Context: {escape(datapoint["context"])}'
_output = [escape(a) for a in datapoint["answers"]]
_id = f"{self.task_identifier}-{split_name}-{len(lines)}"
instance = {"id": _id, "input": _input, "output": _output}
lines.append(json.dumps(instance))
print("Three examples: \n" + "\n".join([str(_) for _ in lines[:3]]))
return lines
def load_dataset(self):
def load_jsonl_gz(gzpath):
data = []
with gzip.open(gzpath, 'rb') as myzip:
for example in myzip:
json_line = json.loads(example)
if "header" in json_line:
print(json_line["header"])
pass
else:
context = json_line["context"]
qa_items = []
for item in json_line["qas"]:
qa_items.append(dict(context=context,
qid=item["qid"],
question=item["question"],
answers=list(set(item["answers"]))))
data.extend(qa_items)
return data
path_to_train = os.path.join(
self.mrqa_path, "mrqa_train", self.subset+".jsonl.gz")
path_to_dev = os.path.join(
self.mrqa_path, "mrqa_dev", self.subset+".jsonl.gz")
dataset = {}
dataset["train"] = load_jsonl_gz(path_to_train)
dataset["validation"] = load_jsonl_gz(path_to_dev)
return dataset
class NLI(TextToTextDataset):
def __init__(self, task_identifier="snli"):
self.task_identifier = task_identifier
# for classification tasks, specify the meaning of each label
self.prompt = " " # are two sentences entailment or not entailment?
if self.task_identifier in ["snli", "anli", "multi_nli"]:
self.label = {
0: ["entailment"],
1: ["neutral"],
2: ["contradiction"]
}
elif self.task_identifier == "qnli":
self.label = {
0: ["entailment"],
1: ["neutral"],
}
elif self.task_identifier == "scitail":
self.label = {
"entails": ["entailment"],
"neutral": ["neutral"],
}
def get_all_lines(self, dataset, splits=["train", "validation", "test"]):
all_lines = {}
for split in splits:
all_lines[split] = deduplicate(self.map_to_list(dataset, split))
show_statistics(all_lines[split])
# TODO: de-duplicate the lines!
return all_lines
def write_dataset(self, path):
"""
return train, dev, test
"""
# load dataset
dataset = self.load_dataset()
# formulate into list (for consistency in np.random)
if self.task_identifier in ["snli", "scitail", "qnli"]:
splits = ["train", "validation", "test"]
elif self.task_identifier == "anli":
splits = ['train_r1', 'dev_r1', 'test_r1', 'train_r2', 'dev_r2', 'test_r2', 'train_r3', 'dev_r3', 'test_r3']
elif self.task_identifier == "multi_nli":
splits = ['validation_matched', 'validation_mismatched']
all_lines = self.get_all_lines(dataset, splits)
# shuffle the data
# np.random.seed(seed)
# np.random.shuffle(train_lines)
os.makedirs(os.path.join(path, self.task_identifier), exist_ok=True)
prefix = os.path.join(path, self.task_identifier,
"{}".format(self.task_identifier))
for split in splits:
write_to_jsonl(all_lines[split], f"{prefix}_{split}.jsonl")
def map_to_list(self, dataset, split_name):
lines = []
for datapoint in dataset[split_name]:
# print(datapoint["label"])
if datapoint["label"] not in self.label:
continue
# lines.append(("Premise: " + datapoint["premise"] + " | Hypothesis: " +
# datapoint["hypothesis"], self.label[datapoint["label"]]))
_id = f"{self.task_identifier}-{split_name}-{len(lines)}"
if self.task_identifier == "qnli":
_input = f'Premise: {datapoint["sentence"]} </s> Hypothesis: {datapoint["question"]}'
else:
_input = f'Premise: {datapoint["premise"]} </s> Hypothesis: {datapoint["hypothesis"]}'
_input += " | Options: entailment, neutral, contradiction "
_output = self.label[datapoint["label"]]
instance = {"id": _id, "input": _input, "output": _output}
lines.append(json.dumps(instance))
print("Three examples: \n" + "\n".join([str(_) for _ in lines[:3]]))
return lines
def load_dataset(self):
if self.task_identifier == "scitail":
return datasets.load_dataset("scitail", "dgem_format")
elif self.task_identifier == "qnli":
return datasets.load_dataset("glue", "qnli")
else:
return datasets.load_dataset(self.task_identifier)
class CSR(TextToTextDataset):
def __init__(self, task_identifier="commonsense_qa"):
self.task_identifier = task_identifier
# for classification tasks, specify the meaning of each label
# self.prompt = " " # are two sentences entailment or not entailment?
# if self.task_identifier in ["snli", "anli", "multi_nli"]:
# self.label = {
# 0: ["entailment"],
# 1: ["neutral"],
# 2: ["contradiction"]
# }
# elif self.task_identifier == "qnli":
# self.label = {
# 0: ["entailment"],
# 1: ["neutral"],
# }
# elif self.task_identifier == "scitail":
# self.label = {
# "entails": ["entailment"],
# "neutral": ["neutral"],
# }
def get_all_lines(self, dataset, splits=["train", "validation", "test"]):
all_lines = {}
for split in splits:
all_lines[split] = deduplicate(self.map_to_list(dataset, split))
show_statistics(all_lines[split])
# TODO: de-duplicate the lines!
return all_lines
def write_dataset(self, path):
"""
return train, dev, test
"""
# load dataset
dataset = self.load_dataset()
# formulate into list (for consistency in np.random)
# if self.task_identifier in ["snli", "scitail", "qnli"]:
# splits = ["train", "validation", "test"]
# elif self.task_identifier == "anli":
# splits = ['train_r1', 'dev_r1', 'test_r1', 'train_r2', 'dev_r2', 'test_r2', 'train_r3', 'dev_r3', 'test_r3']
# elif self.task_identifier == "multi_nli":
# splits = ['validation_matched', 'validation_mismatched']
splits = ["train", "validation"]
all_lines = self.get_all_lines(dataset, splits)
# shuffle the data
# np.random.seed(seed)
# np.random.shuffle(train_lines)
os.makedirs(os.path.join(path, self.task_identifier), exist_ok=True)
prefix = os.path.join(path, self.task_identifier,
"{}".format(self.task_identifier))
for split in splits:
write_to_jsonl(all_lines[split], f"{prefix}_{split}.jsonl")
def map_to_list(self, dataset, split_name):
lines = []
for datapoint in dataset[split_name]:
choices = datapoint["choices"]
choices_map = {}
choice_strs = []
for ind, (key, choice) in enumerate(list(zip(choices["label"], choices["text"]))):
if self.task_identifier == "openbookqa":
key = list("ABCDEF")[ind]
choices_map[key] = choice
choice_strs.append(f"{key}: {choice}")
_id = f"{self.task_identifier}-{split_name}-{len(lines)}"
if self.task_identifier == "openbookqa":
_input = f'Question: {datapoint["question_stem"]} </s> {" | ".join(choice_strs)}'
else:
_input = f'Question: {datapoint["question"]} </s> {" | ".join(choice_strs)}'
_output = [choices_map[datapoint["answerKey"]]]
instance = {"id": _id, "input": _input, "output": _output}
lines.append(json.dumps(instance))
print("Three examples: \n" + "\n".join([str(_) for _ in lines[:3]]))
return lines
def load_dataset(self):
if self.task_identifier == "ai2_arc-easy":
return datasets.load_dataset("ai2_arc", "ARC-Easy")
elif self.task_identifier == "ai2_arc-hard":
return datasets.load_dataset("ai2_arc", "ARC-Challenge")
elif self.task_identifier == "openbookqa":
return datasets.load_dataset("openbookqa", "main")
return datasets.load_dataset(self.task_identifier)
def format(dataset_name, path="./"):
print("Formatting ", dataset_name)
if dataset_name.startswith("mrqa_"):
data = MRQA(subset=dataset_name.split("_")[1])
data.write_dataset(path)
elif dataset_name.startswith("nli#"):
name = dataset_name.split("#")[1]
data = NLI(task_identifier=name)
data.write_dataset(path)
elif dataset_name.startswith("csr#"):
name = dataset_name.split("#")[1]
data = CSR(task_identifier=name)
data.write_dataset(path)
path = "data/"
if len(sys.argv) >= 2:
path = sys.argv[1]
format("mrqa_SQuAD", path)
format("mrqa_TriviaQA", path)
format("mrqa_NaturalQuestions", path)
format("mrqa_HotpotQA", path)
format("mrqa_NewsQA", path)
format("mrqa_SearchQA", path)
# format("nli#snli", path)
# format("nli#anli", path)
# format("nli#multi_nli", path)
# format("nli#scitail", path)
# format("csr#commonsense_qa", path)
# format("csr#riddle_sense", path)
# format("csr#ai2_arc-easy", path)
# format("csr#ai2_arc-hard", path)
# format("csr#openbookqa", path)
|
CMR-main
|
data/data_formatter.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import distutils.command.clean
import os
import shutil
import subprocess
from pathlib import Path
from setuptools import find_packages, setup
cwd = os.path.dirname(os.path.abspath(__file__))
version_txt = os.path.join(cwd, "version.txt")
with open(version_txt, "r") as f:
version = f.readline().strip()
ROOT_DIR = Path(__file__).parent.resolve()
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd)
.decode("ascii")
.strip()
)
except Exception:
sha = "Unknown"
package_name = "rlhive"
if os.getenv("BUILD_VERSION"):
version = os.getenv("BUILD_VERSION")
elif sha != "Unknown":
version += "+" + sha[:7]
def write_version_file():
version_path = os.path.join(cwd, "rlhive", "version.py")
with open(version_path, "w") as f:
f.write("__version__ = '{}'\n".format(version))
f.write("git_version = {}\n".format(repr(sha)))
def _get_pytorch_version():
# if "PYTORCH_VERSION" in os.environ:
# return f"torch=={os.environ['PYTORCH_VERSION']}"
return "torch"
def _get_packages():
exclude = [
"build*",
"test*",
# "rlhive.csrc*",
# "third_party*",
# "tools*",
]
return find_packages(exclude=exclude)
ROOT_DIR = Path(__file__).parent.resolve()
class clean(distutils.command.clean.clean):
def run(self):
# Run default behavior first
distutils.command.clean.clean.run(self)
# Remove rlhive extension
for path in (ROOT_DIR / "rlhive").glob("**/*.so"):
print(f"removing '{path}'")
path.unlink()
# Remove build directory
build_dirs = [
ROOT_DIR / "build",
]
for path in build_dirs:
if path.exists():
print(f"removing '{path}' (and everything under it)")
shutil.rmtree(str(path), ignore_errors=True)
def _check_robohive():
import importlib
import sys
name = "robohive"
spam_loader = importlib.find_loader(name)
found = spam_loader is not None
if name in sys.modules:
print(f"{name!r} already in sys.modules")
# elif (spec := importlib.util.find_spec(name)) is not None:
elif found:
print(f"{name!r} is importable")
else:
raise ImportError(
f"can't find {name!r}: check README.md for " f"install instructions"
)
def _main():
pytorch_package_dep = _get_pytorch_version()
print("-- PyTorch dependency:", pytorch_package_dep)
# branch = _run_cmd(["git", "rev-parse", "--abbrev-ref", "HEAD"])
# tag = _run_cmd(["git", "describe", "--tags", "--exact-match", "@"])
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
# install robohive locally
# subprocess.run(
# [
# "git",
# "clone",
# "-c",
# "submodule.robohive/sims/neuromuscular_sim.update=none",
# "--branch",
# "non-local-install",
# "--recursive",
# "https://github.com/vikashplus/robohive.git",
# "third_party/robohive",
# ]
# )
# subprocess.run(
# [
# "git",
# "clone",
# "--branch",
# "main",
# "https://github.com/pytorch/rl.git",
# "third_party/rl",
# ]
# )
# mj_env_path = os.path.join(os.getcwd(), "third_party", "robohive#egg=robohive")
# rl_path = os.path.join(os.getcwd(), "third_party", "rl#egg=torchrl")
setup(
# Metadata
name="rlhive",
version=version,
author="rlhive contributors",
author_email="[email protected]",
url="https://github.com/fairinternal/rlhive",
long_description=long_description,
long_description_content_type="text/markdown",
license="BSD",
# Package info
packages=find_packages(exclude=("test", "tutorials", "third_party")),
# ext_modules=get_extensions(),
# cmdclass={
# "build_ext": BuildExtension.with_options(no_python_abi_suffix=True),
# "clean": clean,
# },
install_requires=[
pytorch_package_dep,
# "torchrl @ git+ssh://[email protected]/pytorch/rl@main#egg=torchrl",
# f"torchrl @ file://{rl_path}",
"torchrl",
"gym==0.13",
# "robohive",
# f"robohive @ file://{mj_env_path}",
"numpy",
"packaging",
"cloudpickle",
"hydra-core",
"dm_control",
],
zip_safe=False,
dependency_links=[
# location to your egg file
],
extra_requires={
"tests": ["pytest", "pyyaml", "pytest-instafail"],
},
)
if __name__ == "__main__":
write_version_file()
print("Building wheel {}-{}".format(package_name, version))
print(f"BUILD_VERSION is {os.getenv('BUILD_VERSION')}")
# _check_robohive()
_main()
|
agenthive-dev
|
setup.py
|
import robohive
import torchrl
from rlhive import RoboHiveEnv
|
agenthive-dev
|
test/smoke_test.py
|
import argparse
import pytest
import torch
from rlhive.rl_envs import RoboHiveEnv
from torchrl.envs import (
CatTensors,
EnvCreator,
ParallelEnv,
R3MTransform,
TransformedEnv,
)
from torchrl.envs.utils import check_env_specs
def test_state_env():
pass
def test_pixel_env():
pass
@pytest.mark.parametrize(
"env_name",
[
"visual_franka_slide_random-v3",
"visual_franka_slide_close-v3",
"visual_franka_slide_open-v3",
"visual_franka_micro_random-v3",
"visual_franka_micro_close-v3",
"visual_franka_micro_open-v3",
"visual_kitchen_knob1_off-v3",
"visual_kitchen_knob1_on-v3",
"visual_kitchen_knob2_off-v3",
"visual_kitchen_knob2_on-v3",
"visual_kitchen_knob3_off-v3",
"visual_kitchen_knob3_on-v3",
"visual_kitchen_knob4_off-v3",
"visual_kitchen_knob4_on-v3",
"visual_kitchen_light_off-v3",
"visual_kitchen_light_on-v3",
"visual_kitchen_sdoor_close-v3",
"visual_kitchen_sdoor_open-v3",
"visual_kitchen_ldoor_close-v3",
"visual_kitchen_ldoor_open-v3",
"visual_kitchen_rdoor_close-v3",
"visual_kitchen_rdoor_open-v3",
"visual_kitchen_micro_close-v3",
"visual_kitchen_micro_open-v3",
"visual_kitchen_close-v3",
],
)
def test_mixed_env(env_name):
base_env = RoboHiveEnv(
env_name,
)
assert base_env.from_pixels
env = TransformedEnv(
base_env,
CatTensors(
[key for key in base_env.observation_spec.keys() if "pixels" not in key],
"observation",
),
)
# reset
tensordict = env.reset()
assert {"done", "observation", "pixels"} == set(tensordict.keys())
assert tensordict["pixels"].shape[0] == 2
# step
env.rand_step(tensordict)
assert {
"reward",
"done",
"observation",
"pixels",
"action",
("next", "observation"),
("next", "pixels"),
"next",
} == set(tensordict.keys(True))
# rollout
tensordict = env.rollout(10)
assert {
"reward",
"done",
"observation",
"pixels",
"action",
("next", "observation"),
("next", "pixels"),
"next",
} == set(tensordict.keys(True))
assert tensordict.shape == torch.Size([10])
env.close()
@pytest.mark.parametrize(
"env_name",
[
"visual_franka_slide_random-v3",
"visual_franka_slide_close-v3",
"visual_franka_slide_open-v3",
"visual_franka_micro_random-v3",
"visual_franka_micro_close-v3",
"visual_franka_micro_open-v3",
"visual_kitchen_knob1_off-v3",
"visual_kitchen_knob1_on-v3",
"visual_kitchen_knob2_off-v3",
"visual_kitchen_knob2_on-v3",
"visual_kitchen_knob3_off-v3",
"visual_kitchen_knob3_on-v3",
"visual_kitchen_knob4_off-v3",
"visual_kitchen_knob4_on-v3",
"visual_kitchen_light_off-v3",
"visual_kitchen_light_on-v3",
"visual_kitchen_sdoor_close-v3",
"visual_kitchen_sdoor_open-v3",
"visual_kitchen_ldoor_close-v3",
"visual_kitchen_ldoor_open-v3",
"visual_kitchen_rdoor_close-v3",
"visual_kitchen_rdoor_open-v3",
"visual_kitchen_micro_close-v3",
"visual_kitchen_micro_open-v3",
"visual_kitchen_close-v3",
],
)
def test_specs(env_name):
base_env = RoboHiveEnv(
env_name,
)
check_env_specs(base_env)
env = TransformedEnv(
base_env,
CatTensors(
[key for key in base_env.observation_spec.keys() if "pixels" not in key],
"observation",
),
)
check_env_specs(env)
@pytest.mark.parametrize(
"env_name",
[
"visual_franka_slide_random-v3",
"visual_franka_slide_close-v3",
"visual_franka_slide_open-v3",
"visual_franka_micro_random-v3",
"visual_franka_micro_close-v3",
"visual_franka_micro_open-v3",
"visual_kitchen_knob1_off-v3",
"visual_kitchen_knob1_on-v3",
"visual_kitchen_knob2_off-v3",
"visual_kitchen_knob2_on-v3",
"visual_kitchen_knob3_off-v3",
"visual_kitchen_knob3_on-v3",
"visual_kitchen_knob4_off-v3",
"visual_kitchen_knob4_on-v3",
"visual_kitchen_light_off-v3",
"visual_kitchen_light_on-v3",
"visual_kitchen_sdoor_close-v3",
"visual_kitchen_sdoor_open-v3",
"visual_kitchen_ldoor_close-v3",
"visual_kitchen_ldoor_open-v3",
"visual_kitchen_rdoor_close-v3",
"visual_kitchen_rdoor_open-v3",
"visual_kitchen_micro_close-v3",
"visual_kitchen_micro_open-v3",
"visual_kitchen_close-v3",
],
)
def test_parallel(env_name):
def make_env():
base_env = RoboHiveEnv(
env_name,
)
check_env_specs(base_env)
env = TransformedEnv(
base_env,
CatTensors(
[
key
for key in base_env.observation_spec.keys()
if "pixels" not in key
],
"observation",
),
)
return env
env = ParallelEnv(3, make_env)
env.reset()
env.rollout(3)
@pytest.mark.parametrize("parallel", [False, True])
def test_env_render_native(parallel):
if not parallel:
env = RoboHiveEnv(env_name="FrankaReachRandom_v2d-v0")
else:
env = ParallelEnv(3, lambda: RoboHiveEnv(env_name="FrankaReachRandom_v2d-v0"))
td = env.reset()
assert set(td.keys(True)) == {
"done",
"observation",
"pixels",
}
td = env.rand_step(td)
assert set(td.keys(True)) == {
"done",
"next",
("next", "pixels"),
"pixels",
"observation",
("next", "observation"),
"reward",
"action",
}
td = env.rollout(50)
if not parallel:
assert td.shape == torch.Size([50])
else:
assert td.shape == torch.Size([3, 50])
assert set(td.keys(True)) == {
"done",
"next",
("next", "pixels"),
"pixels",
"observation",
("next", "observation"),
"reward",
"action",
}
env.close()
@pytest.mark.parametrize(
"parallel,env_creator", [[True, True], [True, False], [False, True]]
)
def test_env_r3m_native(parallel, env_creator):
if not parallel:
base_env = RoboHiveEnv(env_name="FrankaReachRandom_v2d-v0")
else:
if env_creator:
env_creator = EnvCreator(
lambda: RoboHiveEnv(env_name="FrankaReachRandom_v2d-v0")
)
else:
env_creator = lambda: RoboHiveEnv(env_name="FrankaReachRandom_v2d-v0")
base_env = ParallelEnv(3, env_creator)
env = TransformedEnv(
base_env,
R3MTransform(
"resnet18",
["pixels"],
["pixels_embed"],
),
)
td = env.reset()
_ = env.rand_step(td)
td = env.rollout(50)
if parallel:
assert td.shape == torch.Size([3, 50])
else:
assert td.shape == torch.Size([50])
env.close()
if __name__ == "__main__":
args, unknown = argparse.ArgumentParser().parse_known_args()
pytest.main([__file__, "--capture", "no", "--exitfirst"] + unknown)
|
agenthive-dev
|
test/test_envs.py
|
import torch
def get_available_devices():
devices = [torch.device("cpu")]
n_cuda = torch.cuda.device_count()
if n_cuda > 0:
for i in range(n_cuda):
devices += [torch.device(f"cuda:{i}")]
return devices
|
agenthive-dev
|
test/utils.py
|
import argparse
import pytest
import torch
from omegaconf import OmegaConf
from rlhive.sim_algos.helpers import EnvConfig
from rlhive.sim_algos.run import make_env_constructor
from utils import get_available_devices
@pytest.mark.parametrize("device", get_available_devices())
def test_make_r3menv(device):
cfg = EnvConfig
# hacky way of create a config that can be shared across processes
cfg = OmegaConf.create(OmegaConf.to_yaml(cfg))
cfg.env_name = "FrankaReachRandom_v2d-v0"
cfg.r3m = "resnet50"
cfg.collector_devices = str(device)
cfg.norm_stats = False
cfg.env_per_collector = 2
cfg.pin_memory = False
cfg.batch_transform = True
single_env_constructor, multi_env_constructor = make_env_constructor(cfg)
env = single_env_constructor()
print(env)
td = env.reset()
assert {"done", "observation_vector"} == set(td.keys())
td = env.rollout(10)
assert {
"action",
"done",
(
"next",
"observation_vector",
),
"next",
"observation_vector",
"reward",
} == set(td.keys())
assert td.shape == torch.Size([10])
env = multi_env_constructor
print(env)
td = env.reset()
assert {"done", "observation_vector"} == set(td.keys())
td = env.rollout(10)
assert {
"action",
"done",
(
"next",
"observation_vector",
),
"next",
"observation_vector",
"reward",
} == set(td.keys())
assert td.shape == torch.Size([2, 10])
env.close()
if __name__ == "__main__":
args, unknown = argparse.ArgumentParser().parse_known_args()
pytest.main([__file__, "--capture", "no", "--exitfirst"] + unknown)
|
agenthive-dev
|
test/test_helpers.py
|
''' Use this script to comapare multiple results \n
Usage: python viz_resulyts.py -j expdir1_group0 expdir2_group0 -j expdir3_group1 expdir4_group1 -k "key1" "key2"...
'''
from vtils.plotting import simple_plot
import argparse
from scipy import signal
import pandas
import glob
def get_files(search_path, file_name):
search_path = search_path[:-1] if search_path.endswith('/') else search_path
search_path = search_path+"*/**/"+file_name
filenames = glob.glob(search_path, recursive=True)
assert (len(filenames) > 0), "No file found at: {}".format(search_path)
return filenames
# Another example, Python 3.5+
def get_files_p35(search_path, file_name):
from pathlib import Path
filenames = []
for path in Path(search_path).rglob(file_name):
filenames.append(path)
return filenames
def get_log(filename, format="csv"):
try:
if format=="csv":
data = pandas.read_csv(filename)
elif format=="json":
data = pandas.read_json(filename)
except Exception as e:
print("WARNING: Can't read %s." % filename)
quit()
return data
def smooth_data(y, window_length=101, polyorder=3):
window_length = min(int(len(y) / 2),
window_length) # set maximum valid window length
# if window not off
if window_length % 2 == 0:
window_length = window_length + 1
try:
return signal.savgol_filter(y, window_length, polyorder)
except Exception as e:
return y # nans
# MAIN =========================================================
def main():
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument(
'-j', '--job', required=True, action='append', nargs='?', help='job group')
parser.add_argument(
'-lf', '--log_file', type=str, default="log.csv", help='name of log file (with extension)')
parser.add_argument(
'-cf', '--config_file', type=str, default="job_config.json", help='name of config file (with extension)')
parser.add_argument(
'-t', '--title', type=str, default=None, help='Title of the plot')
parser.add_argument(
'-l', '--label', action='append', nargs='?', help='job group label')
parser.add_argument(
'-s', '--smooth', type=int, default=21, help='window for smoothing')
parser.add_argument(
'-y', '--ykeys', nargs='+', default=["eval_score", 'norm_score'], help='yKeys to plot')
parser.add_argument(
'-x', '--xkey', default="total_num_samples", help='xKey to plot')
parser.add_argument(
'-i', '--index', type=int, default=-4, help='index in log filename to use as labels')
args = parser.parse_args()
# scan labels
if args.label is not None:
assert (len(args.job) == len(args.label)), "The number of labels has to be same as the number of jobs"
else:
args.label = [''] * len(args.job)
# for all the algo jobs
for ialgo, algo_dir in enumerate(args.job):
print("algo> "+algo_dir)
envs_dirs = glob.glob(algo_dir+"/*/")
# for all envs inside the algo
nenv = len(envs_dirs)
for ienv, env_dir in enumerate(sorted(envs_dirs)):
print("env>> "+env_dir)
run_dirs = glob.glob(env_dir+"/*/")
# all the seeds/ variations within the env
for irun, run_dir in enumerate(sorted(run_dirs)):
print("run> "+run_dir)
title = run_dir.split('/')[3]
title = title[:title.find('-v')]
# for log_file in get_files(env_dir, args.file):
log_file = get_files(run_dir, args.log_file)
log = get_log(filename=log_file[0], format="csv")
# validate keys
for key in [args.xkey]+args.ykeys:
assert key in log.keys(), "{} not present in available keys {}".format(key, log.keys())
nykeys = len(args.ykeys)
for iykey, ykey in enumerate(sorted(args.ykeys)):
simple_plot.plot(xdata=log[args.xkey]/1e6,
ydata=smooth_data(log[ykey], args.smooth),
legend='job_name',
subplot_id=(nenv, nykeys, nykeys*ienv+iykey+1),
xaxislabel=args.xkey+'(M)',
plot_name=title,
yaxislabel=ykey,
fig_size=(4*nykeys, 4*nenv),
fig_name='SAC performance'
)
# simple_plot.show_plot()
simple_plot.save_plot(args.job[0]+'RS-SAC.pdf')
if __name__ == '__main__':
main()
|
agenthive-dev
|
agents/utils/plot_all_sac.py
|
''' Use this script to comapare multiple results \n
Usage: python agents/NPG/plot_all_npg.py -j agents/v0.1/kitchen/NPG/outputs_kitchenJ5c_3.8/ -j agents/v0.1/kitchen/NPG/outputs_kitchenJ5d_3.9/ -j /Users/vikashplus/Projects/mj_envs/kitchen/outputs_kitchenJ8a/ -l 'v0.1(fixed_init)' -l 'v0.1(random_init)' -l 'v0.2(random_init)' -pt True
'''
from vtils.plotting import simple_plot
import argparse
from scipy import signal
import pandas
import glob
import numpy as np
import os
def get_files(search_path, file_name):
search_path = search_path[:-1] if search_path.endswith('/') else search_path
search_path = search_path+"*/**/"+file_name
filenames = glob.glob(search_path, recursive=True)
assert (len(filenames) > 0), "No file found at: {}".format(search_path)
return filenames
# Another example, Python 3.5+
def get_files_p35(search_path, file_name):
from pathlib import Path
filenames = []
for path in Path(search_path).rglob(file_name):
filenames.append(path)
return filenames
def get_log(filename, format="csv"):
try:
if format=="csv":
data = pandas.read_csv(filename)
elif format=="json":
data = pandas.read_json(filename)
except Exception as e:
print("WARNING: Can't read %s." % filename)
quit()
return data
def smooth_data(y, window_length=101, polyorder=3):
window_length = min(int(len(y) / 2),
window_length) # set maximum valid window length
# if window not off
if window_length % 2 == 0:
window_length = window_length + 1
try:
return signal.savgol_filter(y, window_length, polyorder)
except Exception as e:
return y # nans
# MAIN =========================================================
def main():
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument(
'-j', '--job', required=True, action='append', nargs='?', help='job group')
parser.add_argument(
'-lf', '--run_log', type=str, default="log.csv", help='name of log file (with extension)')
parser.add_argument(
'-cf', '--config_file', type=str, default="job_config.json", help='name of config file (with extension)')
parser.add_argument(
'-t', '--title', type=str, default=None, help='Title of the plot')
parser.add_argument(
'-l', '--label', action='append', nargs='?', help='job group label')
parser.add_argument(
'-s', '--smooth', type=int, default=21, help='window for smoothing')
parser.add_argument(
'-y', '--ykeys', nargs='+', default=['success_percentage', 'rwd_sparse', 'rwd_dense'], help='yKeys to plot')
parser.add_argument(
'-x', '--xkey', default="num_samples", help='xKey to plot')
parser.add_argument(
'-ei', '--env_index', type=int, default=-2, help='index in log filename to use as labels')
parser.add_argument(
'-pt', '--plot_train', type=bool, default=False, help='plot train perf')
parser.add_argument(
'-od', '--output_dir', type=str, default=None, help='Save outputs here')
args = parser.parse_args()
# init
nykeys = len(args.ykeys)
njob = len(args.job)
nenv = -1
env_labels = []
# scan labels
if args.label is not None:
assert (njob == len(args.label)), "The number of labels has to be same as the number of jobs"
else:
args.label = [''] * njob
# for all the jobs
for ijob, job_dir in enumerate(args.job):
print("Job> "+job_dir)
envs_dirs = glob.glob(job_dir+"/*/")
if nenv ==-1:
nenv = len(envs_dirs)
else:
assert nenv == len(envs_dirs), f"Number of envs changed {envs_dirs}"
for env_dir in sorted(envs_dirs):
env_labels.append(env_dir.split('/')[args.env_index])
# for all envs inside the exp
env_means = []
env_stds = []
for ienv, env_dir in enumerate(sorted(envs_dirs)):
print(" env> "+env_dir)
# all the seeds/ variations runs within the env
yruns = []
xruns = [] # known bug: Logs will different lengths will cause a bug. Its hacked via using [:len(xdata)]
for irun, run_log in enumerate(sorted(get_files(env_dir, args.run_log))):
print(" run> "+run_log, flush=True)
log = get_log(filename=run_log, format="csv")
# validate keys
for key in [args.xkey]+args.ykeys:
assert key in log.keys(), "{} not present in available keys {}".format(key, log.keys())
if 'sample' in args.xkey: #special keys
xdata = np.cumsum(log[args.xkey])/1e6
plot_xkey = args.xkey+"(M)"
else:
xdata = log[args.xkey]
plot_xkey = args.xkey
yruns.append(log[args.ykeys])
# print(xdata.shape, log[args.ykeys].shape)
del log
# stats over keys
yruns = pandas.concat(yruns)
yruns_stacked = yruns.groupby(yruns.index)
yruns_mean = yruns_stacked.mean()
yruns_min = yruns_stacked.min()
yruns_max = yruns_stacked.max()
yruns_std = yruns_stacked.std()
# stats over jobs
env_means.append(yruns_mean.tail(1))
env_stds.append(yruns_std.tail(1))
if args.plot_train:
for iykey, ykey in enumerate(sorted(args.ykeys)):
h_figp,_,_= simple_plot.plot(xdata=xdata,
ydata=smooth_data(yruns_mean[ykey][:len(xdata)], args.smooth),
errmin=yruns_min[ykey][:len(xdata)],
errmax=yruns_max[ykey][:len(xdata)],
legend=args.label[ijob],
subplot_id=(nenv, nykeys, nykeys*ienv+iykey+1),
xaxislabel=plot_xkey,
plot_name=env_labels[ienv],
yaxislabel=ykey,
fig_size=(4*nykeys, 3*nenv),
fig_name='NPG performance',
)
env_means = pandas.concat(env_means)
env_stds = pandas.concat(env_stds)
width = 1/(njob+1)
for iykey, ykey in enumerate(sorted(args.ykeys)):
h_figb, h_axisb, h_bar = simple_plot.bar(
xdata=np.arange(nenv)+width*ijob,
ydata=env_means[ykey],
errdata=env_stds[ykey],
width=width,
subplot_id=(nykeys, 1, iykey+1),
fig_size=(2+0.2*nenv, 4*nykeys),
fig_name="Env perfs",
yaxislabel=ykey,
legend=args.label[ijob],
xticklabels=env_labels[:nenv],
# plot_name="Performance using 5M samples"
)
args.output_dir = args.job[-1] if args.output_dir == None else args.output_dir
if args.plot_train:
simple_plot.save_plot(os.path.join(args.output_dir, 'TrainPerf-NPG.pdf'), h_figp)
simple_plot.save_plot(os.path.join(args.output_dir,'FinalPerf-NPG.pdf'), h_figb)
if __name__ == '__main__':
main()
|
agenthive-dev
|
agents/utils/plot_all_npg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from tensordict.tensordict import make_tensordict, TensorDictBase
from torchrl.data import BoundedTensorSpec, CompositeSpec, UnboundedContinuousTensorSpec
from torchrl.envs.libs.gym import _gym_to_torchrl_spec_transform, _has_gym, GymEnv
from torchrl.envs.transforms import CatTensors, Compose, R3MTransform, TransformedEnv
from torchrl.envs.utils import make_composite_from_td
from torchrl.trainers.helpers.envs import LIBS
if _has_gym:
import gym
class RoboHiveEnv(GymEnv):
# info_keys = ["time", "rwd_dense", "rwd_sparse", "solved"]
def _build_env(
self,
env_name: str,
from_pixels: bool = False,
pixels_only: bool = False,
**kwargs,
) -> "gym.core.Env":
self.pixels_only = pixels_only
try:
render_device = int(str(self.device)[-1])
except ValueError:
render_device = 0
print(f"rendering device: {render_device}, device is {self.device}")
if not _has_gym:
raise RuntimeError(
f"gym not found, unable to create {env_name}. "
f"Consider downloading and installing dm_control from"
f" {self.git_url}"
)
try:
env = self.lib.make(
env_name,
frameskip=self.frame_skip,
device_id=render_device,
return_dict=True,
**kwargs,
)
self.wrapper_frame_skip = 1
from_pixels = bool(len(env.visual_keys))
except TypeError as err:
if "unexpected keyword argument 'frameskip" not in str(err):
raise TypeError(err)
kwargs.pop("framek_skip")
env = self.lib.make(
env_name, return_dict=True, device_id=render_device, **kwargs
)
self.wrapper_frame_skip = self.frame_skip
self.from_pixels = from_pixels
self.render_device = render_device
self.info_dict_reader = self.read_info
return env
def _make_specs(self, env: "gym.Env") -> None:
if self.from_pixels:
num_cams = len(env.visual_keys)
# n_pix = 224 * 224 * 3 * num_cams
# env.observation_space = gym.spaces.Box(
# -8 * np.ones(env.obs_dim - n_pix),
# 8 * np.ones(env.obs_dim - n_pix),
# dtype=np.float32,
# )
self.action_spec = _gym_to_torchrl_spec_transform(
env.action_space, device=self.device
)
observation_spec = _gym_to_torchrl_spec_transform(
env.observation_space,
device=self.device,
)
if not isinstance(observation_spec, CompositeSpec):
observation_spec = CompositeSpec(observation=observation_spec)
self.observation_spec = observation_spec
if self.from_pixels:
self.observation_spec["pixels"] = BoundedTensorSpec(
torch.zeros(
num_cams,
224, # working with 640
224, # working with 480
3,
device=self.device,
dtype=torch.uint8,
),
255
* torch.ones(
num_cams,
224,
224,
3,
device=self.device,
dtype=torch.uint8,
),
torch.Size(torch.Size([num_cams, 224, 224, 3])),
dtype=torch.uint8,
device=self.device,
)
self.reward_spec = UnboundedContinuousTensorSpec(
device=self.device,
) # default
rollout = self.rollout(2).get("next").exclude("done", "reward")[0]
self.observation_spec.update(make_composite_from_td(rollout))
def set_from_pixels(self, from_pixels: bool) -> None:
"""Sets the from_pixels attribute to an existing environment.
Args:
from_pixels (bool): new value for the from_pixels attribute
"""
if from_pixels is self.from_pixels:
return
self.from_pixels = from_pixels
self._make_specs(self.env)
def read_obs(self, observation):
# the info is missing from the reset
observations = self.env.obs_dict
visual = self.env.get_exteroception()
try:
del observations["t"]
except KeyError:
pass
# recover vec
obsvec = []
pixel_list = []
observations.update(visual)
for key in observations:
if key.startswith("rgb"):
pix = observations[key]
if not pix.shape[0] == 1:
pix = pix[None]
pixel_list.append(pix)
elif key in self._env.obs_keys:
value = observations[key]
if not value.shape:
value = value[None]
obsvec.append(value) # ravel helps with images
if obsvec:
obsvec = np.concatenate(obsvec, 0)
if self.from_pixels:
out = {"observation": obsvec, "pixels": np.concatenate(pixel_list, 0)}
else:
out = {"observation": obsvec}
return super().read_obs(out)
def read_info(self, info, tensordict_out):
out = {}
for key, value in info.items():
if key in ("obs_dict", "done", "reward"):
continue
if isinstance(value, dict):
value = {key: _val for key, _val in value.items() if _val is not None}
value = make_tensordict(value, batch_size=[])
out[key] = value
tensordict_out.update(out)
return tensordict_out
def to(self, *args, **kwargs):
out = super().to(*args, **kwargs)
try:
render_device = int(str(out.device)[-1])
except ValueError:
render_device = 0
if render_device != self.render_device:
out._build_env(**self._constructor_kwargs)
return out
def make_r3m_env(env_name, model_name="resnet50", download=True, **kwargs):
base_env = RoboHiveEnv(env_name, from_pixels=True, pixels_only=False)
vec_keys = [k for k in base_env.observation_spec.keys() if k not in "pixels"]
env = TransformedEnv(
base_env,
Compose(
R3MTransform(
model_name,
keys_in=["pixels"],
keys_out=["pixel_r3m"],
download=download,
**kwargs,
),
CatTensors(keys_in=["pixel_r3m", *vec_keys], out_key="observation_vector"),
),
)
return env
LIBS["robohive"] = RoboHiveEnv
|
agenthive-dev
|
rlhive/rl_envs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Custom env reg for RoboHive usage in TorchRL
# Pixel rendering will be queried by torchrl, so we don't include those keys in visual_obs_keys_wt
import os
import warnings
from pathlib import Path
import robohive.envs.multi_task.substeps1
from robohive.envs.env_variants import register_env_variant
visual_obs_keys_wt = robohive.envs.multi_task.substeps1.visual_obs_keys_wt
class set_directory(object):
"""Sets the cwd within the context
Args:
path (Path): The path to the cwd
"""
def __init__(self, path: Path):
self.path = path
self.origin = Path().absolute()
def __enter__(self):
os.chdir(self.path)
def __exit__(self, *args, **kwargs):
os.chdir(self.origin)
def __call__(self, fun):
def new_fun(*args, **kwargs):
with set_directory(Path(self.path)):
return fun(*args, **kwargs)
return new_fun
CURR_DIR = robohive.envs.multi_task.substeps1.CURR_DIR
MODEL_PATH = robohive.envs.multi_task.substeps1.MODEL_PATH
CONFIG_PATH = robohive.envs.multi_task.substeps1.CONFIG_PATH
RANDOM_ENTRY_POINT = robohive.envs.multi_task.substeps1.RANDOM_ENTRY_POINT
FIXED_ENTRY_POINT = robohive.envs.multi_task.substeps1.FIXED_ENTRY_POINT
ENTRY_POINT = RANDOM_ENTRY_POINT
override_keys = [
"objs_jnt",
"end_effector",
"knob1_site_err",
"knob2_site_err",
"knob3_site_err",
"knob4_site_err",
"light_site_err",
"slide_site_err",
"leftdoor_site_err",
"rightdoor_site_err",
"microhandle_site_err",
"kettle_site0_err",
"rgb:right_cam:224x224:2d",
"rgb:left_cam:224x224:2d",
]
@set_directory(CURR_DIR)
def register_kitchen_envs():
print("RLHive:> Registering Kitchen Envs")
env_list = [
"kitchen_knob1_off-v3",
"kitchen_knob1_on-v3",
"kitchen_knob2_off-v3",
"kitchen_knob2_on-v3",
"kitchen_knob3_off-v3",
"kitchen_knob3_on-v3",
"kitchen_knob4_off-v3",
"kitchen_knob4_on-v3",
"kitchen_light_off-v3",
"kitchen_light_on-v3",
"kitchen_sdoor_close-v3",
"kitchen_sdoor_open-v3",
"kitchen_ldoor_close-v3",
"kitchen_ldoor_open-v3",
"kitchen_rdoor_close-v3",
"kitchen_rdoor_open-v3",
"kitchen_micro_close-v3",
"kitchen_micro_open-v3",
"FK1_RelaxFixed-v4",
# "kitchen_close-v3",
]
obs_keys_wt = {
"robot_jnt": 1.0,
"end_effector": 1.0,
}
visual_obs_keys = {
"rgb:right_cam:224x224:2d": 1.0,
"rgb:left_cam:224x224:2d": 1.0,
}
for env in env_list:
try:
new_env_name = "visual_" + env
register_env_variant(
env,
variants={"obs_keys_wt": obs_keys_wt, "visual_keys": list(visual_obs_keys.keys())},
variant_id=new_env_name,
override_keys=override_keys,
)
except AssertionError as err:
warnings.warn(
f"Could not register {new_env_name}, the following error was raised: {err}"
)
@set_directory(CURR_DIR)
def register_franka_envs():
print("RLHive:> Registering Franka Envs")
env_list = [
"franka_slide_random-v3",
"franka_slide_close-v3",
"franka_slide_open-v3",
"franka_micro_random-v3",
"franka_micro_close-v3",
"franka_micro_open-v3",
]
# Franka Appliance ======================================================================
obs_keys_wt = {
"robot_jnt": 1.0,
"end_effector": 1.0,
}
visual_obs_keys = {
"rgb:right_cam:224x224:2d": 1.0,
"rgb:left_cam:224x224:2d": 1.0,
}
for env in env_list:
try:
new_env_name = "visual_" + env
register_env_variant(
env,
variants={"obs_keys_wt": obs_keys_wt, "visual_keys": visual_obs_keys},
variant_id=new_env_name,
override_keys=override_keys,
)
except AssertionError as err:
warnings.warn(
f"Could not register {new_env_name}, the following error was raised: {err}"
)
@set_directory(CURR_DIR)
def register_hand_envs():
print("RLHive:> Registering Arm Envs")
env_list = ["door-v1", "hammer-v1", "pen-v1", "relocate-v1"]
visual_obs_keys = [
"rgb:vil_camera:224x224:2d",
"rgb:fixed:224x224:2d",
]
# Hand Manipulation Suite ======================================================================
for env in env_list:
try:
new_env_name = "visual_" + env
register_env_variant(
env,
variants={
"obs_keys": [
"hand_jnt",
],
"visual_keys": visual_obs_keys,
},
variant_id=new_env_name,
)
except AssertionError as err:
warnings.warn(
f"Could not register {new_env_name}, the following error was raised: {err}"
)
@set_directory(CURR_DIR)
def register_myo_envs():
print("RLHive:> Registering Myo Envs")
env_list = ["motorFingerReachFixed-v0"]
visual_keys = [
"rgb:vil_camera:224x224:2d",
"rgb:fixed:224x224:2d",
]
# Hand Manipulation Suite ======================================================================
for env in env_list:
try:
new_env_name = "visual_" + env
register_env_variant(
env,
variants={
"obs_keys": [
"hand_jnt",
],
"visual_keys": visual_keys,
},
variant_id=new_env_name,
)
except AssertionError as err:
warnings.warn(
f"Could not register {new_env_name}, the following error was raised: {err}"
)
|
agenthive-dev
|
rlhive/envs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .envs import (
register_franka_envs,
register_hand_envs,
register_kitchen_envs,
register_myo_envs,
)
register_franka_envs()
register_kitchen_envs()
register_hand_envs()
register_myo_envs()
from .rl_envs import RoboHiveEnv
|
agenthive-dev
|
rlhive/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional, Union
import torch
from torch.nn import Identity
from torchrl.data.tensor_specs import (
CompositeSpec,
TensorSpec,
UnboundedContinuousTensorSpec,
)
from torchrl.data.utils import DEVICE_TYPING
from torchrl.envs.transforms.transforms import (
CatTensors,
Compose,
FlattenObservation,
ObservationNorm,
Resize,
ToTensorImage,
Transform,
UnsqueezeTransform,
)
try:
from torchvision import models
_has_tv = True
except ImportError:
_has_tv = False
class _RRLNet(Transform):
inplace = False
def __init__(self, in_keys, out_keys, model_name, del_keys: bool = True):
if not _has_tv:
raise ImportError(
"Tried to instantiate RRL without torchvision. Make sure you have "
"torchvision installed in your environment."
)
if model_name == "resnet18":
self.model_name = "rrl_18"
self.outdim = 512
convnet = models.resnet18(pretrained=True)
elif model_name == "resnet34":
self.model_name = "rrl_34"
self.outdim = 512
convnet = models.resnet34(pretrained=True)
elif model_name == "resnet50":
self.model_name = "rrl_50"
self.outdim = 2048
convnet = models.resnet50(pretrained=True)
else:
raise NotImplementedError(
f"model {model_name} is currently not supported by RRL"
)
convnet.fc = Identity()
super().__init__(in_keys=in_keys, out_keys=out_keys)
self.convnet = convnet
self.del_keys = del_keys
def _call(self, tensordict):
tensordict_view = tensordict.view(-1)
super()._call(tensordict_view)
if self.del_keys:
tensordict.exclude(*self.in_keys, inplace=True)
return tensordict
@torch.no_grad()
def _apply_transform(self, obs: torch.Tensor) -> None:
shape = None
if obs.ndimension() > 4:
shape = obs.shape[:-3]
obs = obs.flatten(0, -4)
out = self.convnet(obs)
if shape is not None:
out = out.view(*shape, *out.shape[1:])
return out
def transform_observation_spec(self, observation_spec: TensorSpec) -> TensorSpec:
if not isinstance(observation_spec, CompositeSpec):
raise ValueError("_RRLNet can only infer CompositeSpec")
keys = [key for key in observation_spec._specs.keys() if key in self.in_keys]
device = observation_spec[keys[0]].device
dim = observation_spec[keys[0]].shape[:-3]
observation_spec = CompositeSpec(observation_spec)
if self.del_keys:
for in_key in keys:
del observation_spec[in_key]
for out_key in self.out_keys:
observation_spec[out_key] = UnboundedContinuousTensorSpec(
shape=torch.Size([*dim, self.outdim]), device=device
)
return observation_spec
# @staticmethod
# def _load_weights(model_name, r3m_instance, dir_prefix):
# if model_name not in ("r3m_50", "r3m_34", "r3m_18"):
# raise ValueError(
# "model_name should be one of 'r3m_50', 'r3m_34' or 'r3m_18'"
# )
# # url = "https://download.pytorch.org/models/rl/r3m/" + model_name
# url = "https://pytorch.s3.amazonaws.com/models/rl/r3m/" + model_name + ".pt"
# d = load_state_dict_from_url(
# url,
# progress=True,
# map_location=next(r3m_instance.parameters()).device,
# model_dir=dir_prefix,
# )
# td = TensorDict(d["r3m"], []).unflatten_keys(".")
# td_flatten = td["module"]["convnet"].flatten_keys(".")
# state_dict = td_flatten.to_dict()
# r3m_instance.convnet.load_state_dict(state_dict)
# def load_weights(self, dir_prefix=None):
# self._load_weights(self.model_name, self, dir_prefix)
def _init_first(fun):
def new_fun(self, *args, **kwargs):
if not self.initialized:
self._init()
return fun(self, *args, **kwargs)
return new_fun
class RRLTransform(Compose):
"""RRL Transform class.
RRL provides pre-trained ResNet weights aimed at facilitating visual
embedding for robotic tasks. The models are trained using Ego4d.
See the paper:
Shah, Rutav, and Vikash Kumar. "RRl: Resnet as representation for reinforcement learning."
arXiv preprint arXiv:2107.03380 (2021).
The RRLTransform is created in a lazy manner: the object will be initialized
only when an attribute (a spec or the forward method) will be queried.
The reason for this is that the :obj:`_init()` method requires some attributes of
the parent environment (if any) to be accessed: by making the class lazy we
can ensure that the following code snippet works as expected:
Examples:
>>> transform = RRLTransform("resnet50", in_keys=["pixels"])
>>> env.append_transform(transform)
>>> # the forward method will first call _init which will look at env.observation_spec
>>> env.reset()
Args:
model_name (str): one of resnet50, resnet34 or resnet18
in_keys (list of str): list of input keys. If left empty, the
"pixels" key is assumed.
out_keys (list of str, optional): list of output keys. If left empty,
"rrl_vec" is assumed.
size (int, optional): Size of the image to feed to resnet.
Defaults to 244.
stack_images (bool, optional): if False, the images given in the :obj:`in_keys`
argument will be treaded separetely and each will be given a single,
separated entry in the output tensordict. Defaults to :obj:`True`.
download (bool, optional): if True, the weights will be downloaded using
the torch.hub download API (i.e. weights will be cached for future use).
Defaults to False.
download_path (str, optional): path where to download the models.
Default is None (cache path determined by torch.hub utils).
tensor_pixels_keys (list of str, optional): Optionally, one can keep the
original images (as collected from the env) in the output tensordict.
If no value is provided, this won't be collected.
"""
@classmethod
def __new__(cls, *args, **kwargs):
cls.initialized = False
cls._device = None
cls._dtype = None
return super().__new__(cls)
def __init__(
self,
model_name: str,
in_keys: List[str],
out_keys: List[str] = None,
size: int = 244,
stack_images: bool = True,
download: bool = False,
download_path: Optional[str] = None,
tensor_pixels_keys: List[str] = None,
):
super().__init__()
self.in_keys = in_keys if in_keys is not None else ["pixels"]
self.download = download
self.download_path = download_path
self.model_name = model_name
self.out_keys = out_keys
self.size = size
self.stack_images = stack_images
self.tensor_pixels_keys = tensor_pixels_keys
self._init()
def _init(self):
"""Initializer for RRL."""
self.initialized = True
in_keys = self.in_keys
model_name = self.model_name
out_keys = self.out_keys
size = self.size
stack_images = self.stack_images
tensor_pixels_keys = self.tensor_pixels_keys
# ToTensor
transforms = []
if tensor_pixels_keys:
for i in range(len(in_keys)):
transforms.append(
CatTensors(
in_keys=[in_keys[i]],
out_key=tensor_pixels_keys[i],
del_keys=False,
)
)
totensor = ToTensorImage(
unsqueeze=False,
in_keys=in_keys,
)
transforms.append(totensor)
# Normalize
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
normalize = ObservationNorm(
in_keys=in_keys,
loc=torch.tensor(mean).view(3, 1, 1),
scale=torch.tensor(std).view(3, 1, 1),
standard_normal=True,
)
transforms.append(normalize)
# Resize: note that resize is a no-op if the tensor has the desired size already
resize = Resize(size, size, in_keys=in_keys)
transforms.append(resize)
# RRL
if out_keys is None:
if stack_images:
out_keys = ["rrl_vec"]
else:
out_keys = [f"rrl_vec_{i}" for i in range(len(in_keys))]
self.out_keys = out_keys
elif stack_images and len(out_keys) != 1:
raise ValueError(
f"out_key must be of length 1 if stack_images is True. Got out_keys={out_keys}"
)
elif not stack_images and len(out_keys) != len(in_keys):
raise ValueError(
"out_key must be of length equal to in_keys if stack_images is False."
)
if stack_images and len(in_keys) > 1:
unsqueeze = UnsqueezeTransform(
in_keys=in_keys,
out_keys=in_keys,
unsqueeze_dim=-4,
)
transforms.append(unsqueeze)
cattensors = CatTensors(
in_keys,
out_keys[0],
dim=-4,
)
network = _RRLNet(
in_keys=out_keys,
out_keys=out_keys,
model_name=model_name,
del_keys=False,
)
flatten = FlattenObservation(-2, -1, out_keys)
transforms = [*transforms, cattensors, network, flatten]
else:
network = _RRLNet(
in_keys=in_keys,
out_keys=out_keys,
model_name=model_name,
del_keys=True,
)
transforms = [*transforms, network]
for transform in transforms:
self.append(transform)
# if self.download:
# self[-1].load_weights(dir_prefix=self.download_path)
if self._device is not None:
self.to(self._device)
if self._dtype is not None:
self.to(self._dtype)
def to(self, dest: Union[DEVICE_TYPING, torch.dtype]):
if isinstance(dest, torch.dtype):
self._dtype = dest
else:
self._device = dest
return super().to(dest)
@property
def device(self):
return self._device
@property
def dtype(self):
return self._dtype
|
agenthive-dev
|
rlhive/sim_algos/helpers/rrl_transform.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Multi-node distributed data collection with submitit in contexts where jobs can't launch other jobs.
The default configuration will ask for 8 nodes with 1 GPU each and 32 procs / node.
It should reach a collection speed of roughly 15-25K fps, or better depending
on the cluster specs.
The logic of the script is the following: we create a `main()` function that
executes or code (in this case just a data collection but in practice a training
loop should be present).
Since this `main()` function cannot launch sub-jobs by design, we launch the script
from the jump host and pass the slurm specs to submitit.
*Note*:
Although we don't go in much details into this in this script, the specs of the training
node and the specs of the inference nodes can differ (look at the DEFAULT_SLURM_CONF
and DEFAULT_SLURM_CONF_MAIN dictionaries below).
"""
import time
from argparse import ArgumentParser
import torch
from torchrl.collectors.distributed import submitit_delayed_launcher
from torchrl.collectors.distributed.default_configs import (
DEFAULT_SLURM_CONF,
DEFAULT_SLURM_CONF_MAIN,
)
parser = ArgumentParser()
parser.add_argument("--partition", "-p", help="slurm partition to use")
parser.add_argument("--num_jobs", type=int, default=8, help="Number of jobs")
parser.add_argument("--tcp_port", type=int, default=1234, help="TCP port")
parser.add_argument(
"--num_workers", type=int, default=8, help="Number of workers per node"
)
parser.add_argument(
"--gpus_per_node",
"--gpus-per-node",
"-G",
type=int,
default=1,
help="Number of GPUs per node. If greater than 0, the backend used will be NCCL.",
)
parser.add_argument(
"--cpus_per_task",
"--cpus-per-task",
"-c",
type=int,
default=32,
help="Number of CPUs per node.",
)
parser.add_argument(
"--sync", action="store_true", help="Use --sync to collect data synchronously."
)
parser.add_argument(
"--frames_per_batch",
"--frames-per-batch",
default=4000,
type=int,
help="Number of frames in each batch of data. Must be "
"divisible by the product of nodes and workers if sync, by the number of "
"workers otherwise.",
)
parser.add_argument(
"--total_frames",
"--total-frames",
default=10_000_000,
type=int,
help="Total number of frames collected by the collector.",
)
parser.add_argument(
"--time",
"-t",
default="1:00:00",
help="Timeout for the nodes",
)
parser.add_argument(
"--backend",
"-b",
default="gloo",
help="Backend for the collector",
)
parser.add_argument("--env_name", default="franka_micro_random-v3")
parser.add_argument("--r3m", action="store_true")
args = parser.parse_args()
slurm_gpus_per_node = args.gpus_per_node
slurm_time = args.time
backend = args.backend
DEFAULT_SLURM_CONF["slurm_gpus_per_node"] = slurm_gpus_per_node
DEFAULT_SLURM_CONF["slurm_time"] = slurm_time
DEFAULT_SLURM_CONF["slurm_cpus_per_task"] = args.cpus_per_task
DEFAULT_SLURM_CONF["slurm_partition"] = args.partition
DEFAULT_SLURM_CONF_MAIN["slurm_partition"] = args.partition
DEFAULT_SLURM_CONF_MAIN["slurm_time"] = slurm_time
num_jobs = args.num_jobs
tcp_port = args.tcp_port
num_workers = args.num_workers
sync = args.sync
total_frames = args.total_frames
frames_per_batch = args.frames_per_batch
device = "cpu" if backend == "gloo" else "cuda:0"
def make_env(args):
def constructor():
from rlhive import RoboHiveEnv
from torchrl.envs import EnvCreator, ParallelEnv, R3MTransform, TransformedEnv
from torchrl.envs.libs.gym import GymEnv
if args.num_workers > 1:
penv = ParallelEnv(
args.num_workers,
# EnvCreator(lambda: RoboHiveEnv(args.env_name, device="cuda:0")),
EnvCreator(lambda: GymEnv("Pendulum-v0", device="cuda:0")),
)
else:
# penv = RoboHiveEnv(args.env_name, device="cuda:0")
penv = GymEnv("Pendulum-v0", device="cuda:0")
if "visual" in args.env_name:
if args.r3m:
tenv = TransformedEnv(
penv,
R3MTransform(
in_keys=["pixels"], download=True, model_name="resnet50"
),
)
else:
tenv = penv
else:
tenv = penv
return tenv
return constructor
@submitit_delayed_launcher(
num_jobs=num_jobs,
backend=backend,
tcpport=tcp_port,
)
def main():
assert torch.cuda.device_count()
import tqdm
from torchrl.collectors import SyncDataCollector
from torchrl.collectors.collectors import RandomPolicy
from torchrl.collectors.distributed.generic import DistributedDataCollector
from torchrl.envs import EnvCreator
collector_class = SyncDataCollector
collector = DistributedDataCollector(
[EnvCreator(make_env(args))] * num_jobs,
policy=RandomPolicy(make_env(args)().action_spec),
launcher="submitit_delayed",
frames_per_batch=frames_per_batch,
total_frames=total_frames,
tcp_port=tcp_port,
collector_class=collector_class,
num_workers_per_collector=args.num_workers,
collector_kwargs={
"device": "cuda:0" if slurm_gpus_per_node else "cpu",
"storing_device": device,
},
storing_device="cpu",
backend=backend,
sync=sync,
)
counter = 0
pbar = tqdm.tqdm(total=collector.total_frames)
for i, data in enumerate(collector):
pbar.update(data.numel())
pbar.set_description(f"data shape: {data.shape}, data device: {data.device}")
if i >= 10:
counter += data.numel()
if i == 10:
t0 = time.time()
t1 = time.time()
print(f"time elapsed: {t1-t0}s, rate: {counter/(t1-t0)} fps")
collector.shutdown()
exit()
if __name__ == "__main__":
main()
|
agenthive-dev
|
examples/collection_speed_delayed.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from omegaconf import DictConfig
os.environ["sim_backend"] = "MUJOCO"
def main(args: DictConfig):
import numpy as np
import torch.cuda
import tqdm
from rlhive.rl_envs import RoboHiveEnv
from tensordict import TensorDict
from torch import nn, optim
from torchrl.collectors import MultiaSyncDataCollector
from torchrl.data import TensorDictPrioritizedReplayBuffer, TensorDictReplayBuffer
from torchrl.data.replay_buffers.storages import LazyMemmapStorage
# from torchrl.envs import SerialEnv as ParallelEnv, R3MTransform, SelectTransform, TransformedEnv
from torchrl.envs import (
CatTensors,
EnvCreator,
ParallelEnv,
R3MTransform,
SelectTransform,
TransformedEnv,
)
from torchrl.envs.transforms import Compose, FlattenObservation, RewardScaling
from torchrl.envs.utils import set_exploration_mode, step_mdp
from torchrl.modules import MLP, NormalParamWrapper, SafeModule
from torchrl.modules.distributions import TanhNormal
from torchrl.modules.tensordict_module.actors import (
ProbabilisticActor,
ValueOperator,
)
from torchrl.objectives import SoftUpdate
from torchrl.objectives.deprecated import REDQLoss_deprecated as REDQLoss
from torchrl.record import VideoRecorder
from torchrl.record.loggers.wandb import WandbLogger
from torchrl.trainers import Recorder
# ===========================================================================================
# Env constructor
# ---------------
# - Use the RoboHiveEnv class to wrap robohive envs in torchrl's GymWrapper
# - Add transforms immediately after that:
# - SelectTransform: selects the relevant kesy from our output
# - R3MTransform
# - FlattenObservation: The images delivered by robohive have a singleton dim to start with, we need to flatten that
# - RewardScaling
#
# One can also possibly use ObservationNorm.
#
# TIPS:
# - For faster execution, you should follow this abstract scheme, where we reduce the data
# to be passed from worker to worker to a minimum, we apply R3M to a batch and append the
# rest of the transforms afterward:
#
# >>> env = TransformedEnv(
# ... ParallelEnv(N, lambda: TransformedEnv(RoboHiveEnv(...), SelectTransform(...))),
# ... Compose(
# ... R3MTransform(...),
# ... FlattenObservation(...),
# ... *other_transforms,
# ... ))
#
def traj_is_solved(done, solved):
solved = solved.view_as(done)
done_cumsum = done.cumsum(-2)
count = 0
_i = 0
for _i, u in enumerate(done_cumsum.unique()):
is_solved = solved[done_cumsum == u].any()
count += is_solved
return count / (_i + 1)
def traj_total_reward(done, reward):
reward = reward.view_as(done)
done_cumsum = done.cumsum(-2)
count = 0
_i = 0
for _i, u in enumerate(done_cumsum.unique()):
count += reward[done_cumsum == u].sum()
return count / (_i + 1)
def make_env(num_envs, task, visual_transform, reward_scaling, device):
if num_envs > 1:
base_env = ParallelEnv(
num_envs, EnvCreator(lambda: RoboHiveEnv(task, device=device))
)
else:
base_env = RoboHiveEnv(task, device=device)
env = make_transformed_env(
env=base_env,
reward_scaling=reward_scaling,
visual_transform=visual_transform,
)
return env
def make_transformed_env(
env,
reward_scaling=5.0,
visual_transform="r3m",
):
"""
Apply transforms to the env (such as reward scaling and state normalization)
"""
env = TransformedEnv(
env,
SelectTransform(
"solved", "pixels", "observation", "rwd_dense", "rwd_sparse"
),
)
if visual_transform == "r3m":
vec_keys = ["r3m_vec"]
selected_keys = ["observation", "r3m_vec"]
env.append_transform(
Compose(
R3MTransform("resnet50", in_keys=["pixels"], download=True).eval(),
FlattenObservation(-2, -1, in_keys=vec_keys),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
elif visual_transform == "rrl":
vec_keys = ["r3m_vec"]
selected_keys = ["observation", "r3m_vec"]
env.append_transform(
Compose(
R3MTransform(
"resnet50", in_keys=["pixels"], download="IMAGENET1K_V2"
).eval(),
FlattenObservation(-2, -1, in_keys=vec_keys),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
elif not visual_transform:
selected_keys = ["observation"]
else:
raise NotImplementedError(visual_transform)
env.append_transform(RewardScaling(loc=0.0, scale=reward_scaling))
out_key = "observation_vector"
env.append_transform(CatTensors(in_keys=selected_keys, out_key=out_key))
return env
# ===========================================================================================
# Making a recorder
# -----------------
#
# A `Recorder` is a dedicated torchrl class that will run the policy in the test env
# once every X steps (eg X=1M).
#
def make_recorder(
task: str,
frame_skip: int,
record_interval: int,
actor_model_explore: object,
eval_traj: int,
env_configs: dict,
wandb_logger: WandbLogger,
num_envs: int,
):
test_env = make_env(num_envs=num_envs, task=task, **env_configs)
if "visual" in task:
test_env.insert_transform(
0, VideoRecorder(wandb_logger, "test", in_keys=["pixels"])
)
test_env.reset()
recorder_obj = Recorder(
record_frames=eval_traj * test_env.horizon,
frame_skip=frame_skip,
policy_exploration=actor_model_explore,
recorder=test_env,
exploration_mode="mean",
record_interval=record_interval,
log_keys=["reward", "solved", "done", "rwd_sparse"],
out_keys={
"reward": "r_evaluation",
"solved": "success",
"done": "done",
"rwd_sparse": "rwd_sparse",
},
)
return recorder_obj
# ===========================================================================================
# Relplay buffers
# ---------------
#
# TorchRL also provides prioritized RBs if needed.
#
def make_replay_buffer(
prb: bool,
buffer_size: int,
buffer_scratch_dir: str,
device: torch.device,
prefetch: int = 10,
):
if prb:
replay_buffer = TensorDictPrioritizedReplayBuffer(
alpha=0.7,
beta=0.5,
pin_memory=False,
prefetch=prefetch,
storage=LazyMemmapStorage(
buffer_size,
scratch_dir=buffer_scratch_dir,
device=device,
),
)
else:
replay_buffer = TensorDictReplayBuffer(
pin_memory=False,
prefetch=prefetch,
storage=LazyMemmapStorage(
buffer_size,
scratch_dir=buffer_scratch_dir,
device=device,
),
)
return replay_buffer
# ===========================================================================================
# Dataloader
# ----------
#
# This is a simplified version of the dataloder
#
@torch.no_grad()
@set_exploration_mode("random")
def dataloader(
total_frames, fpb, train_env, actor, actor_collection, device_collection
):
params = TensorDict(
{k: v for k, v in actor.named_parameters()}, batch_size=[]
).unflatten_keys(".")
params_collection = TensorDict(
{k: v for k, v in actor_collection.named_parameters()}, batch_size=[]
).unflatten_keys(".")
_prev = None
collected_frames = 0
while collected_frames < total_frames:
params_collection.update_(params)
batch = TensorDict(
{}, batch_size=[fpb, *train_env.batch_size], device=device_collection
)
for t in range(fpb):
if _prev is None:
_prev = train_env.reset()
_reset = _prev["_reset"] = _prev["done"].clone().squeeze(-1)
if _reset.any():
_prev = train_env.reset(_prev)
_new = train_env.step(actor_collection(_prev))
batch[t] = _new
_prev = step_mdp(_new, exclude_done=False)
collected_frames += batch.numel()
yield batch
# customize device at will
device = args.device
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# Create Environment
env_configs = {
"reward_scaling": args.reward_scaling,
"visual_transform": args.visual_transform,
"device": "cpu",
}
train_env = make_env(num_envs=args.env_per_collector, task=args.task, **env_configs)
# add forward pass for initialization with proof env
proof_env = make_env(num_envs=1, task=args.task, **env_configs)
# Create Agent
# Define Actor Network
in_keys = ["observation_vector"]
action_spec = proof_env.action_spec
actor_net_kwargs = {
"num_cells": [256, 256],
"out_features": 2 * action_spec.shape[-1],
"activation_class": nn.ReLU,
}
actor_net = MLP(**actor_net_kwargs)
dist_class = TanhNormal
dist_kwargs = {
"min": action_spec.space.minimum,
"max": action_spec.space.maximum,
"tanh_loc": True,
}
actor_net = NormalParamWrapper(
actor_net,
scale_mapping=f"biased_softplus_{1.0}",
scale_lb=0.1,
)
in_keys_actor = in_keys
actor_module = SafeModule(
actor_net,
in_keys=in_keys_actor,
out_keys=[
"loc",
"scale",
],
)
actor = ProbabilisticActor(
spec=action_spec,
in_keys=["loc", "scale"],
module=actor_module,
distribution_class=dist_class,
distribution_kwargs=dist_kwargs,
default_interaction_mode="random",
return_log_prob=True,
)
# Define Critic Network
qvalue_net_kwargs = {
"num_cells": [256, 256],
"out_features": 1,
"activation_class": nn.ReLU,
}
qvalue_net = MLP(
**qvalue_net_kwargs,
)
qvalue = ValueOperator(
in_keys=["action"] + in_keys,
module=qvalue_net,
)
model = actor, qvalue = nn.ModuleList([actor, qvalue]).to(device)
# init nets
with torch.no_grad(), set_exploration_mode("random"):
td = proof_env.reset()
td = td.to(device)
for net in model:
net(td)
del td
proof_env.close()
actor_model_explore = model[0]
# Create REDQ loss
loss_module = REDQLoss(
actor_network=model[0],
qvalue_network=model[1],
gamma=args.gamma,
loss_function="smooth_l1",
)
# Define Target Network Updater
target_net_updater = SoftUpdate(loss_module, args.target_update_polyak)
# Make Replay Buffer
replay_buffer = make_replay_buffer(
prb=args.prb,
buffer_size=args.buffer_size,
buffer_scratch_dir=args.buffer_scratch_dir,
device="cpu",
)
# Optimizers
params = list(loss_module.parameters())
optimizer = optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
rewards = []
rewards_eval = []
# Main loop
target_net_updater.init_()
collected_frames = 0
episodes = 0
optim_steps = 0
pbar = tqdm.tqdm(total=args.total_frames)
r0 = None
loss = None
logger = WandbLogger(
exp_name=args.task,
project=args.wandb_project,
name=args.exp_name,
config=args,
entity=args.wandb_entity,
mode=args.wandb_mode,
)
# Trajectory recorder for evaluation
recorder = make_recorder(
task=args.task,
frame_skip=args.frame_skip,
record_interval=args.record_interval,
actor_model_explore=actor_model_explore,
eval_traj=args.eval_traj,
env_configs=env_configs,
wandb_logger=logger,
num_envs=args.num_record_envs,
)
collector_device = args.device_collection
if isinstance(collector_device, str):
collector_device = [collector_device]
collector = MultiaSyncDataCollector(
create_env_fn=[train_env for _ in collector_device],
policy=actor_model_explore,
total_frames=args.total_frames,
max_frames_per_traj=args.frames_per_batch,
frames_per_batch=args.frames_per_batch,
init_random_frames=args.init_random_frames,
reset_at_each_iter=False,
postproc=None,
split_trajs=False,
devices=collector_device, # device for execution
passing_devices=collector_device, # device where data will be stored and passed
seed=args.seed,
pin_memory=False,
update_at_each_batch=False,
exploration_mode="random",
)
for i, batch in enumerate(collector):
collector.update_policy_weights_()
if r0 is None:
r0 = batch["reward"].sum(-1).mean().item()
pbar.update(batch.numel())
# extend the replay buffer with the new data
batch = batch.cpu().view(-1)
current_frames = batch.numel()
collected_frames += current_frames
episodes += batch["done"].sum()
replay_buffer.extend(batch)
# optimization steps
if collected_frames >= args.init_random_frames:
(
total_losses,
actor_losses,
q_losses,
alpha_losses,
alphas,
entropies,
) = ([], [], [], [], [], [])
for _ in range(
max(1, args.frames_per_batch * args.utd_ratio // args.batch_size)
):
optim_steps += 1
# sample from replay buffer
sampled_tensordict = (
replay_buffer.sample(args.batch_size).clone().to(device)
)
loss_td = loss_module(sampled_tensordict)
actor_loss = loss_td["loss_actor"]
q_loss = loss_td["loss_qvalue"]
alpha_loss = loss_td["loss_alpha"]
loss = actor_loss + q_loss + alpha_loss
optimizer.zero_grad()
loss.backward()
gn = torch.nn.utils.clip_grad_norm_(params, args.clip_norm)
optimizer.step()
# update qnet_target params
target_net_updater.step()
# update priority
if args.prb:
replay_buffer.update_tensordict_priority(sampled_tensordict)
total_losses.append(loss.item())
actor_losses.append(actor_loss.item())
q_losses.append(q_loss.item())
alpha_losses.append(alpha_loss.item())
alphas.append(loss_td["alpha"].item())
entropies.append(loss_td["entropy"].item())
rewards.append((i, batch["reward"].mean().item()))
logger.log_scalar("train_reward", rewards[-1][1], step=collected_frames)
logger.log_scalar("optim_steps", optim_steps, step=collected_frames)
logger.log_scalar("episodes", episodes, step=collected_frames)
if loss is not None:
logger.log_scalar(
"total_loss", np.mean(total_losses), step=collected_frames
)
logger.log_scalar(
"actor_loss", np.mean(actor_losses), step=collected_frames
)
logger.log_scalar("q_loss", np.mean(q_losses), step=collected_frames)
logger.log_scalar(
"alpha_loss", np.mean(alpha_losses), step=collected_frames
)
logger.log_scalar("alpha", np.mean(alphas), step=collected_frames)
logger.log_scalar("entropy", np.mean(entropies), step=collected_frames)
logger.log_scalar("grad_norm", gn, step=collected_frames)
td_record = recorder(None)
if td_record is not None:
rewards_eval.append(
(
i,
td_record["r_evaluation"]
/ recorder.recorder.batch_size.numel(), # divide by number of eval worker
)
)
logger.log_scalar("test_reward", rewards_eval[-1][1], step=collected_frames)
solved = traj_is_solved(td_record["done"], td_record["success"])
logger.log_scalar("success", solved, step=collected_frames)
rwd_sparse = traj_total_reward(td_record["done"], td_record["rwd_sparse"])
logger.log_scalar("rwd_sparse", rwd_sparse, step=collected_frames)
if len(rewards_eval):
pbar.set_description(
f"reward: {rewards[-1][1]: 4.4f} (r0 = {r0: 4.4f}), test reward: {rewards_eval[-1][1]: 4.4f}, solved: {solved}"
)
del batch
# gc.collect()
if __name__ == "__main__":
main()
|
agenthive-dev
|
examples/redq.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from omegaconf import DictConfig
os.environ["sim_backend"] = "MUJOCO"
os.environ["MUJOCO_GL"] = "egl"
def main(args: DictConfig):
import numpy as np
import torch.cuda
import tqdm
from rlhive.rl_envs import RoboHiveEnv
from sac_loss import SACLoss
from tensordict import TensorDict
from torch import nn, optim
from torchrl.collectors import MultiaSyncDataCollector
from torchrl.data import TensorDictPrioritizedReplayBuffer, TensorDictReplayBuffer
from torchrl.data.replay_buffers.storages import LazyMemmapStorage
# from torchrl.envs import SerialEnv as ParallelEnv, R3MTransform, SelectTransform, TransformedEnv
from torchrl.envs import (
CatTensors,
EnvCreator,
ParallelEnv,
R3MTransform,
SelectTransform,
TransformedEnv,
)
from torchrl.envs.transforms import Compose, FlattenObservation, RewardScaling
from torchrl.envs.utils import set_exploration_mode, step_mdp
from torchrl.modules import MLP, NormalParamWrapper, SafeModule
from torchrl.modules.distributions import TanhNormal
from torchrl.modules.tensordict_module.actors import (
ProbabilisticActor,
ValueOperator,
)
from torchrl.objectives import SoftUpdate
from torchrl.record import VideoRecorder
from torchrl.record.loggers.wandb import WandbLogger
from torchrl.trainers import Recorder
# ===========================================================================================
# Env constructor
# ---------------
# - Use the RoboHiveEnv class to wrap robohive envs in torchrl's GymWrapper
# - Add transforms immediately after that:
# - SelectTransform: selects the relevant kesy from our output
# - R3MTransform
# - FlattenObservation: The images delivered by robohive have a singleton dim to start with, we need to flatten that
# - RewardScaling
#
# One can also possibly use ObservationNorm.
#
# TIPS:
# - For faster execution, you should follow this abstract scheme, where we reduce the data
# to be passed from worker to worker to a minimum, we apply R3M to a batch and append the
# rest of the transforms afterward:
#
# >>> env = TransformedEnv(
# ... ParallelEnv(N, lambda: TransformedEnv(RoboHiveEnv(...), SelectTransform(...))),
# ... Compose(
# ... R3MTransform(...),
# ... FlattenObservation(...),
# ... *other_transforms,
# ... ))
#
def traj_is_solved(done, solved):
solved = solved.view_as(done)
done_cumsum = done.cumsum(-2)
count = 0
_i = 0
for _i, u in enumerate(done_cumsum.unique()):
is_solved = solved[done_cumsum == u].any()
count += is_solved
return count / (_i + 1)
def traj_total_reward(done, reward):
reward = reward.view_as(done)
done_cumsum = done.cumsum(-2)
count = 0
_i = 0
for _i, u in enumerate(done_cumsum.unique()):
count += reward[done_cumsum == u].sum()
return count / (_i + 1)
def make_env(num_envs, task, visual_transform, reward_scaling, device):
if num_envs > 1:
base_env = ParallelEnv(
num_envs, EnvCreator(lambda: RoboHiveEnv(task, device=device))
)
else:
base_env = RoboHiveEnv(task, device=device)
env = make_transformed_env(
env=base_env,
reward_scaling=reward_scaling,
visual_transform=visual_transform,
)
return env
def make_transformed_env(
env,
reward_scaling=5.0,
visual_transform="r3m",
):
"""
Apply transforms to the env (such as reward scaling and state normalization)
"""
env = TransformedEnv(
env,
SelectTransform(
"solved", "pixels", "observation", "rwd_dense", "rwd_sparse"
),
)
if visual_transform == "r3m":
vec_keys = ["r3m_vec"]
selected_keys = ["observation", "r3m_vec"]
env.append_transform(
Compose(
R3MTransform("resnet50", in_keys=["pixels"], download=True).eval(),
FlattenObservation(-2, -1, in_keys=vec_keys),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
elif visual_transform == "rrl":
vec_keys = ["r3m_vec"]
selected_keys = ["observation", "r3m_vec"]
env.append_transform(
Compose(
R3MTransform(
"resnet50", in_keys=["pixels"], download="IMAGENET1K_V2"
).eval(),
FlattenObservation(-2, -1, in_keys=vec_keys),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
elif not visual_transform:
selected_keys = ["observation"]
else:
raise NotImplementedError(visual_transform)
env.append_transform(RewardScaling(loc=0.0, scale=reward_scaling))
out_key = "observation_vector"
env.append_transform(CatTensors(in_keys=selected_keys, out_key=out_key))
return env
# ===========================================================================================
# Making a recorder
# -----------------
#
# A `Recorder` is a dedicated torchrl class that will run the policy in the test env
# once every X steps (eg X=1M).
#
def make_recorder(
task: str,
frame_skip: int,
record_interval: int,
actor_model_explore: object,
eval_traj: int,
env_configs: dict,
wandb_logger: WandbLogger,
num_envs: int,
):
test_env = make_env(num_envs=num_envs, task=task, **env_configs)
if "visual" in task:
test_env.insert_transform(
0, VideoRecorder(wandb_logger, "test", in_keys=["pixels"])
)
test_env.reset()
recorder_obj = Recorder(
record_frames=eval_traj * test_env.horizon,
frame_skip=frame_skip,
policy_exploration=actor_model_explore,
recorder=test_env,
exploration_mode="mean",
record_interval=record_interval,
log_keys=["reward", "solved", "done", "rwd_sparse"],
out_keys={
"reward": "r_evaluation",
"solved": "success",
"done": "done",
"rwd_sparse": "rwd_sparse",
},
)
return recorder_obj
# ===========================================================================================
# Relplay buffers
# ---------------
#
# TorchRL also provides prioritized RBs if needed.
#
def make_replay_buffer(
prb: bool,
buffer_size: int,
buffer_scratch_dir: str,
device: torch.device,
prefetch: int = 10,
):
if prb:
replay_buffer = TensorDictPrioritizedReplayBuffer(
alpha=0.7,
beta=0.5,
pin_memory=False,
prefetch=prefetch,
storage=LazyMemmapStorage(
buffer_size,
scratch_dir=buffer_scratch_dir,
device=device,
),
)
else:
replay_buffer = TensorDictReplayBuffer(
pin_memory=False,
prefetch=prefetch,
storage=LazyMemmapStorage(
buffer_size,
scratch_dir=buffer_scratch_dir,
device=device,
),
)
return replay_buffer
# ===========================================================================================
# Dataloader
# ----------
#
# This is a simplified version of the dataloder
#
@torch.no_grad()
@set_exploration_mode("random")
def dataloader(
total_frames, fpb, train_env, actor, actor_collection, device_collection
):
params = TensorDict(
{k: v for k, v in actor.named_parameters()}, batch_size=[]
).unflatten_keys(".")
params_collection = TensorDict(
{k: v for k, v in actor_collection.named_parameters()}, batch_size=[]
).unflatten_keys(".")
_prev = None
collected_frames = 0
while collected_frames < total_frames:
params_collection.update_(params)
batch = TensorDict(
{}, batch_size=[fpb, *train_env.batch_size], device=device_collection
)
for t in range(fpb):
if _prev is None:
_prev = train_env.reset()
_reset = _prev["_reset"] = _prev["done"].clone().squeeze(-1)
if _reset.any():
_prev = train_env.reset(_prev)
_new = train_env.step(actor_collection(_prev))
batch[t] = _new
_prev = step_mdp(_new, exclude_done=False)
collected_frames += batch.numel()
yield batch
# customize device at will
device = args.device
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# Create Environment
env_configs = {
"reward_scaling": args.reward_scaling,
"visual_transform": args.visual_transform,
"device": device,
}
train_env = make_env(num_envs=args.env_per_collector, task=args.task, **env_configs)
# add forward pass for initialization with proof env
proof_env = make_env(num_envs=1, task=args.task, **env_configs)
# Create Agent
# Define Actor Network
in_keys = ["observation_vector"]
action_spec = proof_env.action_spec
actor_net_kwargs = {
"num_cells": [256, 256],
"out_features": 2 * action_spec.shape[-1],
"activation_class": nn.ReLU,
}
actor_net = MLP(**actor_net_kwargs)
dist_class = TanhNormal
dist_kwargs = {
"min": action_spec.space.minimum,
"max": action_spec.space.maximum,
"tanh_loc": True,
}
actor_net = NormalParamWrapper(
actor_net,
scale_mapping=f"biased_softplus_{1.0}",
scale_lb=0.1,
)
in_keys_actor = in_keys
actor_module = SafeModule(
actor_net,
in_keys=in_keys_actor,
out_keys=[
"loc",
"scale",
],
)
actor = ProbabilisticActor(
spec=action_spec,
in_keys=["loc", "scale"],
module=actor_module,
distribution_class=dist_class,
distribution_kwargs=dist_kwargs,
default_interaction_mode="random",
return_log_prob=True,
)
# Define Critic Network
qvalue_net_kwargs = {
"num_cells": [256, 256],
"out_features": 1,
"activation_class": nn.ReLU,
}
qvalue_net = MLP(
**qvalue_net_kwargs,
)
qvalue = ValueOperator(
in_keys=["action"] + in_keys,
module=qvalue_net,
)
model = actor, qvalue = nn.ModuleList([actor, qvalue]).to(device)
# init nets
with torch.no_grad(), set_exploration_mode("random"):
td = proof_env.reset()
td = td.to(device)
for net in model:
net(td)
del td
proof_env.close()
actor_model_explore = model[0]
# Create SAC loss
loss_module = SACLoss(
actor_network=model[0],
qvalue_network=model[1],
num_qvalue_nets=2,
gamma=args.gamma,
loss_function="smooth_l1",
)
# Define Target Network Updater
target_net_updater = SoftUpdate(loss_module, args.target_update_polyak)
# Make Replay Buffer
replay_buffer = make_replay_buffer(
prb=args.prb,
buffer_size=args.buffer_size,
buffer_scratch_dir=args.buffer_scratch_dir,
device=args.device,
)
# Optimizers
params = list(loss_module.parameters())
optimizer = optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
rewards = []
rewards_eval = []
# Main loop
target_net_updater.init_()
collected_frames = 0
episodes = 0
optim_steps = 0
pbar = tqdm.tqdm(total=args.total_frames)
r0 = None
loss = None
logger = WandbLogger(
exp_name=args.task,
project=args.wandb_project,
name=args.exp_name,
config=args,
entity=args.wandb_entity,
mode=args.wandb_mode,
)
# Trajectory recorder for evaluation
recorder = make_recorder(
task=args.task,
frame_skip=args.frame_skip,
record_interval=args.record_interval,
actor_model_explore=actor_model_explore,
eval_traj=args.eval_traj,
env_configs=env_configs,
wandb_logger=logger,
num_envs=args.num_record_envs,
)
collector_device = args.device_collection
if isinstance(collector_device, str):
collector_device = [collector_device]
collector = MultiaSyncDataCollector(
create_env_fn=[train_env for _ in collector_device],
policy=actor_model_explore,
total_frames=args.total_frames,
max_frames_per_traj=args.frames_per_batch,
frames_per_batch=args.frames_per_batch,
init_random_frames=args.init_random_frames,
reset_at_each_iter=False,
postproc=None,
split_trajs=False,
devices=collector_device,
# device for execution
storing_devices=collector_device,
# device where data will be stored and passed
seed=args.seed,
pin_memory=False,
update_at_each_batch=False,
exploration_mode="random",
)
for i, batch in enumerate(collector):
collector.update_policy_weights_()
if r0 is None:
r0 = batch["reward"].sum(-1).mean().item()
pbar.update(batch.numel())
# extend the replay buffer with the new data
batch = batch.cpu().view(-1)
current_frames = batch.numel()
collected_frames += current_frames
episodes += batch["done"].sum()
replay_buffer.extend(batch)
# optimization steps
if collected_frames >= args.init_random_frames:
(
total_losses,
actor_losses,
q_losses,
alpha_losses,
alphas,
entropies,
) = ([], [], [], [], [], [])
for _ in range(
max(1, args.frames_per_batch * args.utd_ratio // args.batch_size)
):
optim_steps += 1
# sample from replay buffer
sampled_tensordict = (
replay_buffer.sample(args.batch_size).clone().to(device)
)
loss_td = loss_module(sampled_tensordict)
actor_loss = loss_td["loss_actor"]
q_loss = loss_td["loss_qvalue"]
alpha_loss = loss_td["loss_alpha"]
loss = actor_loss + q_loss + alpha_loss
optimizer.zero_grad()
loss.backward()
gn = torch.nn.utils.clip_grad_norm_(params, args.clip_norm)
optimizer.step()
# update qnet_target params
target_net_updater.step()
# update priority
if args.prb:
replay_buffer.update_tensordict_priority(sampled_tensordict)
total_losses.append(loss.item())
actor_losses.append(actor_loss.item())
q_losses.append(q_loss.item())
alpha_losses.append(alpha_loss.item())
alphas.append(loss_td["alpha"].item())
entropies.append(loss_td["entropy"].item())
rewards.append((i, batch["reward"].mean().item()))
logger.log_scalar("train_reward", rewards[-1][1], step=collected_frames)
logger.log_scalar("optim_steps", optim_steps, step=collected_frames)
logger.log_scalar("episodes", episodes, step=collected_frames)
if loss is not None:
logger.log_scalar(
"total_loss", np.mean(total_losses), step=collected_frames
)
logger.log_scalar(
"actor_loss", np.mean(actor_losses), step=collected_frames
)
logger.log_scalar("q_loss", np.mean(q_losses), step=collected_frames)
logger.log_scalar(
"alpha_loss", np.mean(alpha_losses), step=collected_frames
)
logger.log_scalar("alpha", np.mean(alphas), step=collected_frames)
logger.log_scalar("entropy", np.mean(entropies), step=collected_frames)
logger.log_scalar("grad_norm", gn, step=collected_frames)
td_record = recorder(None)
if td_record is not None:
rewards_eval.append(
(
i,
td_record["r_evaluation"] / recorder.recorder.batch_size.numel(),
# divide by number of eval worker
)
)
logger.log_scalar("test_reward", rewards_eval[-1][1], step=collected_frames)
solved = traj_is_solved(td_record["done"], td_record["success"])
logger.log_scalar("success", solved, step=collected_frames)
rwd_sparse = traj_total_reward(td_record["done"], td_record["rwd_sparse"])
logger.log_scalar("rwd_sparse", rwd_sparse, step=collected_frames)
if len(rewards_eval):
pbar.set_description(
f"reward: {rewards[-1][1]: 4.4f} (r0 = {r0: 4.4f}), test reward: {rewards_eval[-1][1]: 4.4f}, solved: {solved}"
)
del batch
# gc.collect()
|
agenthive-dev
|
examples/sac.py
|
"""Entry point for RLHive"""
import hydra
from omegaconf import DictConfig
from redq import main as train_redq
from sac import main as train_sac
@hydra.main(config_name="sac_mixed.yaml", config_path="config")
def main(args: DictConfig):
if args.algo == "sac":
train_sac(args)
if args.algo == "redq":
train_redq(args)
else:
raise NotImplementedError
if __name__ == "__main__":
main()
|
agenthive-dev
|
examples/train.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
os.environ["sim_backend"] = "MUJOCO"
import argparse
import time
import tqdm
from rlhive.rl_envs import RoboHiveEnv
from torchrl.collectors.collectors import MultiaSyncDataCollector, RandomPolicy
from torchrl.collectors.distributed import DistributedDataCollector, RPCDataCollector
from torchrl.envs import EnvCreator, ParallelEnv, R3MTransform, TransformedEnv
parser = argparse.ArgumentParser()
parser.add_argument("--num_workers", default=2, type=int)
parser.add_argument("--num_collectors", default=4, type=int)
parser.add_argument("--frames_per_batch", default=200, type=int)
parser.add_argument("--total_frames", default=20_000, type=int)
parser.add_argument("--r3m", action="store_true")
parser.add_argument("--env_name", default="franka_micro_random-v3")
if __name__ == "__main__":
args = parser.parse_args()
if args.num_workers > 1:
penv = ParallelEnv(
args.num_workers,
EnvCreator(lambda: RoboHiveEnv(args.env_name, device="cpu")),
)
else:
penv = RoboHiveEnv(args.env_name, device="cpu")
if "visual" in args.env_name:
if args.r3m:
tenv = TransformedEnv(
penv,
R3MTransform(in_keys=["pixels"], download=True, model_name="resnet50"),
)
else:
tenv = penv
else:
tenv = penv
# tenv.transform[-1].init_stats(reduce_dim=(0, 1), cat_dim=1,
# num_iter=1000)
policy = RandomPolicy(tenv.action_spec) # some random policy
device = "cpu"
slurm_conf = {
"timeout_min": 100,
"slurm_partition": "train",
"slurm_cpus_per_gpu": 12,
"slurm_gpus_per_task": 1,
}
collector = DistributedDataCollector(
[tenv] * args.num_collectors,
policy=policy,
frames_per_batch=args.frames_per_batch,
total_frames=args.total_frames,
storing_device=device,
split_trajs=False,
sync=True,
launcher="mp",
slurm_kwargs=slurm_conf,
backend="gloo",
)
pbar = tqdm.tqdm(total=args.total_frames)
for i, data in enumerate(collector):
if i == 3:
t0 = time.time()
total = 0
if i >= 3:
total += data.numel()
pbar.update(data.numel())
t = time.time() - t0
print(f"{args.env_name}, Time: {t:4.4f}, Rate: {args.total_frames / t: 4.4f} fps")
del collector
del tenv
|
agenthive-dev
|
examples/collection_speed.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from numbers import Number
from typing import Union
import numpy as np
import torch
from tensordict.nn import TensorDictSequential
from tensordict.tensordict import TensorDict, TensorDictBase
from torch import Tensor
from torchrl.envs.utils import set_exploration_mode, step_mdp
from torchrl.modules import SafeModule
from torchrl.objectives.common import LossModule
from torchrl.objectives.utils import (
distance_loss,
next_state_value as get_next_state_value,
)
try:
from functorch import vmap
FUNCTORCH_ERR = ""
_has_functorch = True
except ImportError as err:
FUNCTORCH_ERR = str(err)
_has_functorch = False
class SACLoss(LossModule):
"""SAC Loss module.
Args:
actor_network (SafeModule): the actor to be trained
qvalue_network (SafeModule): a single Q-value network that will be multiplicated as many times as needed.
num_qvalue_nets (int, optional): Number of Q-value networks to be trained. Default is 10.
gamma (Number, optional): gamma decay factor. Default is 0.99.
priotity_key (str, optional): Key where to write the priority value for prioritized replay buffers. Default is
`"td_error"`.
loss_function (str, optional): loss function to be used for the Q-value. Can be one of `"smooth_l1"`, "l2",
"l1", Default is "smooth_l1".
alpha_init (float, optional): initial entropy multiplier.
Default is 1.0.
min_alpha (float, optional): min value of alpha.
Default is 0.1.
max_alpha (float, optional): max value of alpha.
Default is 10.0.
fixed_alpha (bool, optional): whether alpha should be trained to match a target entropy. Default is :obj:`False`.
target_entropy (Union[str, Number], optional): Target entropy for the stochastic policy. Default is "auto".
delay_qvalue (bool, optional): Whether to separate the target Q value networks from the Q value networks used
for data collection. Default is :obj:`False`.
gSDE (bool, optional): Knowing if gSDE is used is necessary to create random noise variables.
Default is False
"""
delay_actor: bool = False
_explicit: bool = False
def __init__(
self,
actor_network: SafeModule,
qvalue_network: SafeModule,
num_qvalue_nets: int = 2,
gamma: Number = 0.99,
priotity_key: str = "td_error",
loss_function: str = "smooth_l1",
alpha_init: float = 1.0,
min_alpha: float = 0.1,
max_alpha: float = 10.0,
fixed_alpha: bool = False,
target_entropy: Union[str, Number] = "auto",
delay_qvalue: bool = True,
gSDE: bool = False,
):
if not _has_functorch:
raise ImportError(
f"Failed to import functorch with error message:\n{FUNCTORCH_ERR}"
)
super().__init__()
self.convert_to_functional(
actor_network,
"actor_network",
create_target_params=self.delay_actor,
funs_to_decorate=["forward", "get_dist_params"],
)
# let's make sure that actor_network has `return_log_prob` set to True
self.actor_network.return_log_prob = True
self.delay_qvalue = delay_qvalue
self.convert_to_functional(
qvalue_network,
"qvalue_network",
num_qvalue_nets,
create_target_params=self.delay_qvalue,
compare_against=list(actor_network.parameters()),
)
self.num_qvalue_nets = num_qvalue_nets
self.register_buffer("gamma", torch.tensor(gamma))
self.priority_key = priotity_key
self.loss_function = loss_function
try:
device = next(self.parameters()).device
except AttributeError:
device = torch.device("cpu")
self.register_buffer("alpha_init", torch.tensor(alpha_init, device=device))
self.register_buffer(
"min_log_alpha", torch.tensor(min_alpha, device=device).log()
)
self.register_buffer(
"max_log_alpha", torch.tensor(max_alpha, device=device).log()
)
self.fixed_alpha = fixed_alpha
if fixed_alpha:
self.register_buffer(
"log_alpha", torch.tensor(math.log(alpha_init), device=device)
)
else:
self.register_parameter(
"log_alpha",
torch.nn.Parameter(torch.tensor(math.log(alpha_init), device=device)),
)
if target_entropy == "auto":
if actor_network.spec["action"] is None:
raise RuntimeError(
"Cannot infer the dimensionality of the action. Consider providing "
"the target entropy explicitely or provide the spec of the "
"action tensor in the actor network."
)
target_entropy = -float(np.prod(actor_network.spec["action"].shape))
self.register_buffer(
"target_entropy", torch.tensor(target_entropy, device=device)
)
self.gSDE = gSDE
@property
def alpha(self):
self.log_alpha.data.clamp_(self.min_log_alpha, self.max_log_alpha)
with torch.no_grad():
alpha = self.log_alpha.exp()
return alpha
def forward(self, tensordict: TensorDictBase) -> TensorDictBase:
if self._explicit:
# slow but explicit version
return self._forward_explicit(tensordict)
else:
return self._forward_vectorized(tensordict)
def _loss_alpha(self, log_pi: Tensor) -> Tensor:
if torch.is_grad_enabled() and not log_pi.requires_grad:
raise RuntimeError(
"expected log_pi to require gradient for the alpha loss)"
)
if self.target_entropy is not None:
# we can compute this loss even if log_alpha is not a parameter
alpha_loss = -self.log_alpha.exp() * (log_pi.detach() + self.target_entropy)
else:
# placeholder
alpha_loss = torch.zeros_like(log_pi)
return alpha_loss
def _forward_vectorized(self, tensordict: TensorDictBase) -> TensorDictBase:
obs_keys = self.actor_network.in_keys
tensordict_select = tensordict.select(
"reward", "done", "next", *obs_keys, "action"
)
actor_params = torch.stack(
[self.actor_network_params, self.target_actor_network_params], 0
)
tensordict_actor_grad = tensordict_select.select(
*obs_keys
) # to avoid overwriting keys
next_td_actor = step_mdp(tensordict_select).select(
*self.actor_network.in_keys
) # next_observation ->
tensordict_actor = torch.stack([tensordict_actor_grad, next_td_actor], 0)
tensordict_actor = tensordict_actor.contiguous()
with set_exploration_mode("random"):
if self.gSDE:
tensordict_actor.set(
"_eps_gSDE",
torch.zeros(tensordict_actor.shape, device=tensordict_actor.device),
)
# vmap doesn't support sampling, so we take it out from the vmap
td_params = vmap(self.actor_network.get_dist_params)(
tensordict_actor,
actor_params,
)
if isinstance(self.actor_network, TensorDictSequential):
sample_key = self.actor_network[-1].out_keys[0]
tensordict_actor_dist = self.actor_network.build_dist_from_params(
td_params
)
else:
sample_key = self.actor_network.out_keys[0]
tensordict_actor_dist = self.actor_network.build_dist_from_params(
td_params
)
tensordict_actor[sample_key] = self._rsample(tensordict_actor_dist)
tensordict_actor["sample_log_prob"] = tensordict_actor_dist.log_prob(
tensordict_actor[sample_key]
)
# repeat tensordict_actor to match the qvalue size
_actor_loss_td = (
tensordict_actor[0]
.select(*self.qvalue_network.in_keys)
.expand(self.num_qvalue_nets, *tensordict_actor[0].batch_size)
) # for actor loss
_qval_td = tensordict_select.select(*self.qvalue_network.in_keys).expand(
self.num_qvalue_nets,
*tensordict_select.select(*self.qvalue_network.in_keys).batch_size,
) # for qvalue loss
_next_val_td = (
tensordict_actor[1]
.select(*self.qvalue_network.in_keys)
.expand(self.num_qvalue_nets, *tensordict_actor[1].batch_size)
) # for next value estimation
tensordict_qval = torch.cat(
[
_actor_loss_td,
_next_val_td,
_qval_td,
],
0,
)
# cat params
q_params_detach = self.qvalue_network_params.detach()
qvalue_params = torch.cat(
[
q_params_detach,
self.target_qvalue_network_params,
self.qvalue_network_params,
],
0,
)
tensordict_qval = vmap(self.qvalue_network)(
tensordict_qval,
qvalue_params,
)
state_action_value = tensordict_qval.get("state_action_value").squeeze(-1)
(
state_action_value_actor,
next_state_action_value_qvalue,
state_action_value_qvalue,
) = state_action_value.split(
[self.num_qvalue_nets, self.num_qvalue_nets, self.num_qvalue_nets],
dim=0,
)
sample_log_prob = tensordict_actor.get("sample_log_prob").squeeze(-1)
(
action_log_prob_actor,
next_action_log_prob_qvalue,
) = sample_log_prob.unbind(0)
# E[alpha * log_pi(a) - Q(s, a)] where a is reparameterized
loss_actor = -(
state_action_value_actor.min(0)[0] - self.alpha * action_log_prob_actor
).mean()
next_state_value = (
next_state_action_value_qvalue.min(0)[0]
- self.alpha * next_action_log_prob_qvalue
)
target_value = get_next_state_value(
tensordict,
gamma=self.gamma,
pred_next_val=next_state_value,
)
pred_val = state_action_value_qvalue
td_error = (pred_val - target_value).pow(2)
loss_qval = (
distance_loss(
pred_val,
target_value.expand_as(pred_val),
loss_function=self.loss_function,
)
.mean(-1)
.sum()
* 0.5
)
tensordict.set("td_error", td_error.detach().max(0)[0])
loss_alpha = self._loss_alpha(sample_log_prob)
if not loss_qval.shape == loss_actor.shape:
raise RuntimeError(
f"QVal and actor loss have different shape: {loss_qval.shape} and {loss_actor.shape}"
)
td_out = TensorDict(
{
"loss_actor": loss_actor.mean(),
"loss_qvalue": loss_qval.mean(),
"loss_alpha": loss_alpha.mean(),
"alpha": self.alpha.detach(),
"entropy": -sample_log_prob.mean().detach(),
"state_action_value_actor": state_action_value_actor.mean().detach(),
"action_log_prob_actor": action_log_prob_actor.mean().detach(),
"next.state_value": next_state_value.mean().detach(),
"target_value": target_value.mean().detach(),
},
[],
)
return td_out
def _forward_explicit(self, tensordict: TensorDictBase) -> TensorDictBase:
loss_actor, sample_log_prob = self._loss_actor_explicit(tensordict.clone(False))
loss_qval, td_error = self._loss_qval_explicit(tensordict.clone(False))
tensordict.set("td_error", td_error.detach().max(0)[0])
loss_alpha = self._loss_alpha(sample_log_prob)
td_out = TensorDict(
{
"loss_actor": loss_actor.mean(),
"loss_qvalue": loss_qval.mean(),
"loss_alpha": loss_alpha.mean(),
"alpha": self.alpha.detach(),
"entropy": -sample_log_prob.mean().detach(),
# "state_action_value_actor": state_action_value_actor.mean().detach(),
# "action_log_prob_actor": action_log_prob_actor.mean().detach(),
# "next.state_value": next_state_value.mean().detach(),
# "target_value": target_value.mean().detach(),
},
[],
)
return td_out
def _rsample(
self,
dist,
):
# separated only for the purpose of making the sampling
# deterministic to compare methods
return dist.rsample()
def _sample_reparam(self, tensordict, params):
"""Given a policy param batch and input data in a tensordict, writes a reparam sample and log-prob key."""
with set_exploration_mode("random"):
if self.gSDE:
raise NotImplementedError
# vmap doesn't support sampling, so we take it out from the vmap
td_params = self.actor_network.get_dist_params(
tensordict,
params,
)
if isinstance(self.actor_network, TensorDictSequential):
sample_key = self.actor_network[-1].out_keys[0]
tensordict_actor_dist = self.actor_network.build_dist_from_params(
td_params
)
else:
sample_key = self.actor_network.out_keys[0]
tensordict_actor_dist = self.actor_network.build_dist_from_params(
td_params
)
tensordict[sample_key] = self._rsample(tensordict_actor_dist)
tensordict["sample_log_prob"] = tensordict_actor_dist.log_prob(
tensordict[sample_key]
)
return tensordict
def _loss_actor_explicit(self, tensordict):
tensordict_actor = tensordict.clone(False)
actor_params = self.actor_network_params
tensordict_actor = self._sample_reparam(tensordict_actor, actor_params)
action_log_prob_actor = tensordict_actor["sample_log_prob"]
tensordict_qval = tensordict_actor.select(*self.qvalue_network.in_keys).expand(
self.num_qvalue_nets, *tensordict_actor.batch_size
) # for actor loss
qvalue_params = self.qvalue_network_params.detach()
tensordict_qval = vmap(self.qvalue_network)(
tensordict_qval,
qvalue_params,
)
state_action_value_actor = tensordict_qval.get("state_action_value").squeeze(-1)
state_action_value_actor = state_action_value_actor.min(0)[0]
# E[alpha * log_pi(a) - Q(s, a)] where a is reparameterized
loss_actor = (
self.alpha * action_log_prob_actor - state_action_value_actor
).mean()
return loss_actor, action_log_prob_actor
def _loss_qval_explicit(self, tensordict):
next_tensordict = step_mdp(tensordict)
next_tensordict = self._sample_reparam(
next_tensordict, self.target_actor_network_params
)
next_action_log_prob_qvalue = next_tensordict["sample_log_prob"]
next_state_action_value_qvalue = vmap(self.qvalue_network, (None, 0))(
next_tensordict,
self.target_qvalue_network_params,
)["state_action_value"].squeeze(-1)
next_state_value = (
next_state_action_value_qvalue.min(0)[0]
- self.alpha * next_action_log_prob_qvalue
)
pred_val = vmap(self.qvalue_network, (None, 0))(
tensordict,
self.qvalue_network_params,
)["state_action_value"].squeeze(-1)
target_value = get_next_state_value(
tensordict,
gamma=self.gamma,
pred_next_val=next_state_value,
)
# 1/2 * E[Q(s,a) - (r + gamma * (Q(s,a)-alpha log pi(s, a)))
loss_qval = (
distance_loss(
pred_val,
target_value.expand_as(pred_val),
loss_function=self.loss_function,
)
.mean(-1)
.sum()
* 0.5
)
td_error = (pred_val - target_value).pow(2)
return loss_qval, td_error
if __name__ == "__main__":
from tensordict.nn import TensorDictModule
from torch import nn
from torchrl.data import BoundedTensorSpec
# Tests the vectorized version of SAC-v2 against plain implementation
from torchrl.modules import ProbabilisticActor, ValueOperator
from torchrl.modules.distributions import TanhNormal
torch.manual_seed(0)
action_spec = BoundedTensorSpec(-1, 1, shape=(3,))
class Splitter(nn.Linear):
def forward(self, x):
loc, scale = super().forward(x).chunk(2, -1)
return loc, scale.exp()
actor_module = TensorDictModule(
Splitter(6, 6), in_keys=["obs"], out_keys=["loc", "scale"]
)
actor = ProbabilisticActor(
spec=action_spec,
in_keys=["loc", "scale"],
module=actor_module,
distribution_class=TanhNormal,
default_interaction_mode="random",
return_log_prob=False,
)
class QVal(nn.Linear):
def forward(self, s: Tensor, a: Tensor) -> Tensor:
return super().forward(torch.cat([s, a], -1))
qvalue = ValueOperator(QVal(9, 1), in_keys=["obs", "action"])
_rsample_old = SACLoss._rsample
def _rsample_new(self, dist):
return torch.ones_like(_rsample_old(self, dist))
SACLoss._rsample = _rsample_new
loss = SACLoss(actor, qvalue)
for batch in ((), (2, 3)):
td_input = TensorDict(
{
"obs": torch.rand(*batch, 6),
"action": torch.rand(*batch, 3).clamp(-1, 1),
"next": {"obs": torch.rand(*batch, 6)},
"reward": torch.rand(*batch, 1),
"done": torch.zeros(*batch, 1, dtype=torch.bool),
},
batch,
)
loss._explicit = True
loss0 = loss(td_input)
loss._explicit = False
loss1 = loss(td_input)
print("a", loss0["loss_actor"] - loss1["loss_actor"])
print("q", loss0["loss_qvalue"] - loss1["loss_qvalue"])
|
agenthive-dev
|
examples/sac_loss.py
|
import json
import random
import torch
import numpy as np
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def control_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def stack_tensor_list(tensor_list):
return np.array(tensor_list)
def stack_tensor_dict_list(tensor_dict_list):
"""
Stack a list of dictionaries of {tensors or dictionary of tensors}.
:param tensor_dict_list: a list of dictionaries of {tensors or dictionary of tensors}.
:return: a dictionary of {stacked tensors or dictionary of stacked tensors}
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
if isinstance(example, dict):
v = stack_tensor_dict_list([x[k] for x in tensor_dict_list])
else:
v = stack_tensor_list([x[k] for x in tensor_dict_list])
ret[k] = v
return ret
def tensorize(var, device='cpu'):
"""
Convert input to torch.Tensor on desired device
:param var: type either torch.Tensor or np.ndarray
:param device: desired device for output (e.g. cpu, cuda)
:return: torch.Tensor mapped to the device
"""
if type(var) == torch.Tensor:
return var.to(device)
elif type(var) == np.ndarray:
return torch.from_numpy(var).float().to(device)
elif type(var) == float:
return torch.tensor(var).float()
else:
print("Variable type not compatible with function.")
return None
|
agenthive-dev
|
scripts/bc/misc.py
|
"""
Minimize bc loss (MLE, MSE, RWR etc.) with pytorch optimizers
"""
import logging
logging.disable(logging.CRITICAL)
import numpy as np
import torch
import time as timer
from tqdm import tqdm
from misc import tensorize
class BC:
def __init__(self, expert_paths,
policy,
epochs = 5,
batch_size = 64,
lr = 1e-3,
optimizer = None,
loss_type = 'MSE', # can be 'MLE' or 'MSE'
save_logs = True,
logger = None,
set_transforms = False,
*args, **kwargs,
):
self.policy = policy
self.expert_paths = expert_paths
self.epochs = epochs
self.mb_size = batch_size
self.logger = logger
self.loss_type = loss_type
self.save_logs = save_logs
self.device = self.policy.device
assert (self.loss_type == 'MSE' or self.loss_type == 'MLE')
if self.save_logs: assert not self.logger is None
if set_transforms:
in_shift, in_scale, out_shift, out_scale = self.compute_transformations()
self.set_transformations(in_shift, in_scale, out_shift, out_scale)
#self.set_variance_with_data(out_scale)
# construct optimizer
self.optimizer = torch.optim.Adam(self.policy.trainable_params, lr=lr) if optimizer is None else optimizer
# Loss criterion if required
if loss_type == 'MSE':
self.loss_criterion = torch.nn.MSELoss()
def compute_transformations(self):
# get transformations
if self.expert_paths == [] or self.expert_paths is None:
in_shift, in_scale, out_shift, out_scale = None, None, None, None
else:
print(type(self.expert_paths))
if type(self.expert_paths) is list:
observations = np.concatenate([path["observations"] for path in self.expert_paths])
actions = np.concatenate([path["actions"] for path in self.expert_paths])
else: # 'h5py._hl.files.File'
observations = np.concatenate([self.expert_paths[k]['observations'] for k in self.expert_paths.keys()])
actions = np.concatenate([self.expert_paths[k]['actions'] for k in self.expert_paths.keys()])
in_shift, in_scale = np.mean(observations, axis=0), np.std(observations, axis=0)
out_shift, out_scale = np.mean(actions, axis=0), np.std(actions, axis=0)
return in_shift, in_scale, out_shift, out_scale
def set_transformations(self, in_shift=None, in_scale=None, out_shift=None, out_scale=None):
# set scalings in the target policy
self.policy.set_transformations(in_shift, in_scale, out_shift, out_scale)
def set_variance_with_data(self, out_scale):
# set the variance of gaussian policy based on out_scale
out_scale = tensorize(out_scale, device=self.policy.device)
data_log_std = torch.log(out_scale + 1e-3)
self.policy.set_log_std(data_log_std)
def loss(self, data, idx=None):
if self.loss_type == 'MLE':
return self.mle_loss(data, idx)
elif self.loss_type == 'MSE':
return self.mse_loss(data, idx)
else:
print("Please use valid loss type")
return None
def mle_loss(self, data, idx):
# use indices if provided (e.g. for mini-batching)
# otherwise, use all the data
idx = range(data['observations'].shape[0]) if idx is None else idx
if type(data['observations']) == torch.Tensor:
idx = torch.LongTensor(idx)
obs = data['observations'][idx]
act = data['expert_actions'][idx]
mu, LL = self.policy.mean_LL(obs, act)
# minimize negative log likelihood
return -torch.mean(LL)
def mse_loss(self, data, idx=None):
idx = range(data['observations'].shape[0]) if idx is None else idx
if type(data['observations']) is torch.Tensor:
idx = torch.LongTensor(idx)
obs = data['observations'][idx]
act_expert = data['expert_actions'][idx]
act_expert = tensorize(act_expert, device=self.policy.device)
act_pi = self.policy.forward(obs)
return self.loss_criterion(act_pi, act_expert.detach())
def fit(self, data, suppress_fit_tqdm=False, **kwargs):
# data is a dict
# keys should have "observations" and "expert_actions"
validate_keys = all([k in data.keys() for k in ["observations", "expert_actions"]])
assert validate_keys is True
ts = timer.time()
num_samples = data["observations"].shape[0]
# log stats before
if self.save_logs:
loss_val = self.loss(data, idx=range(num_samples)).to('cpu').data.numpy().ravel()[0]
self.logger.log_scalar("train/loss_before", loss_val, step=0)
print('BC loss before', loss_val)
# train loop
for ep in config_tqdm(range(self.epochs), suppress_fit_tqdm):
avg_loss = 0.0
step = 0
for mb in range(int(num_samples / self.mb_size)):
rand_idx = np.random.choice(num_samples, size=self.mb_size)
self.optimizer.zero_grad()
loss = self.loss(data, idx=rand_idx)
loss.backward()
self.optimizer.step()
avg_loss = (avg_loss*step + loss.item())/(step+1)
step += 1
if self.save_logs:
self.logger.log_scalar("train/bc_loss", avg_loss, step=ep+1)
# log stats after
if self.save_logs:
loss_val = self.loss(data, idx=range(num_samples)).to('cpu').data.numpy().ravel()[0]
self.logger.log_scalar("train/loss_after", loss_val, step=self.epochs)
print('BC val loss', loss_val)
def train(self, **kwargs):
if not hasattr(self, 'data'):
observations = np.concatenate([path["observations"] for path in self.expert_paths])
expert_actions = np.concatenate([path["actions"] for path in self.expert_paths])
observations = tensorize(observations, device=self.policy.device)
expert_actions = tensorize(expert_actions, self.policy.device)
self.data = dict(observations=observations, expert_actions=expert_actions)
self.fit(self.data, **kwargs)
def train_h5(self, **kwargs):
if not hasattr(self, 'data'):
observations = np.concatenate([self.expert_paths[k]['observations'] for k in self.expert_paths.keys()])
expert_actions = np.concatenate([self.expert_paths[k]['actions'] for k in self.expert_paths.keys()])
observations = tensorize(observations, device=self.policy.device)
expert_actions = tensorize(expert_actions, self.policy.device)
self.data = dict(observations=observations, expert_actions=expert_actions)
self.fit(self.data, **kwargs)
def config_tqdm(range_inp, suppress_tqdm=False):
if suppress_tqdm:
return range_inp
else:
return tqdm(range_inp)
|
agenthive-dev
|
scripts/bc/behavior_cloning.py
|
"""
Job script to learn policy using BC
"""
import os
import time
from os import environ
environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
environ['MKL_THREADING_LAYER']='GNU'
import pickle
import yaml
import hydra
import gym
import wandb
import numpy as np
from omegaconf import DictConfig, OmegaConf, ListConfig
from batch_norm_mlp import BatchNormMLP
from gmm_policy import GMMPolicy
from behavior_cloning import BC
from misc import control_seed, \
bcolors, stack_tensor_dict_list
from torchrl.record.loggers.wandb import WandbLogger
from robohive.logger.grouped_datasets import Trace as Trace
def evaluate_policy(
policy,
env,
num_episodes,
epoch,
horizon=None,
gamma=1,
percentile=[],
get_full_dist=False,
eval_logger=None,
device='cpu',
seed=123,
verbose=True,
):
env.seed(seed)
horizon = env.horizon if horizon is None else horizon
mean_eval, std, min_eval, max_eval = 0.0, 0.0, -1e8, -1e8
ep_returns = np.zeros(num_episodes)
policy.eval()
paths = []
for ep in range(num_episodes):
observations=[]
actions=[]
rewards=[]
agent_infos = []
env_infos = []
o = env.reset()
t, done = 0, False
while t < horizon and (done == False):
a = policy.get_action(o)[1]['evaluation']
next_o, r, done, env_info = env.step(a)
ep_returns[ep] += (gamma ** t) * r
observations.append(o)
actions.append(a)
rewards.append(r)
agent_infos.append(None)
env_infos.append(env_info)
o = next_o
t += 1
if verbose:
print("Episode: {}; Reward: {}".format(ep, ep_returns[ep]))
path = dict(
observations=np.array(observations),
actions=np.array(actions),
rewards=np.array(rewards),
#agent_infos=stack_tensor_dict_list(agent_infos),
env_infos=stack_tensor_dict_list(env_infos),
terminated=done
)
paths.append(path)
mean_eval, std = np.mean(ep_returns), np.std(ep_returns)
min_eval, max_eval = np.amin(ep_returns), np.amax(ep_returns)
base_stats = [mean_eval, std, min_eval, max_eval]
percentile_stats = []
for p in percentile:
percentile_stats.append(np.percentile(ep_returns, p))
full_dist = ep_returns if get_full_dist is True else None
success = env.evaluate_success(paths, logger=None) ## Don't use the mj_envs logging function
if not eval_logger is None:
rwd_sparse = np.mean([np.mean(p['env_infos']['rwd_sparse']) for p in paths]) # return rwd/step
rwd_dense = np.mean([np.sum(p['env_infos']['rwd_dense'])/env.horizon for p in paths]) # return rwd/step
eval_logger.log_scalar('eval/rwd_sparse', rwd_sparse, step=epoch)
eval_logger.log_scalar('eval/rwd_dense', rwd_dense, step=epoch)
eval_logger.log_scalar('eval/success', success, step=epoch)
return [base_stats, percentile_stats, full_dist], success
class ObservationWrapper:
def __init__(self, env_name, visual_keys, encoder):
self.env = gym.make(env_name, visual_keys=visual_keys)
self.horizon = self.env.horizon
self.encoder = encoder
def reset(self, **kwargs):
obs = self.env.reset(**kwargs)
return self.get_obs(obs)
def step(self, action):
observation, reward, terminated, info = self.env.step(action)
return self.get_obs(observation), reward, terminated, info
def get_obs(self, observation=None):
if self.encoder == 'proprio':
proprio_vec = self.env.get_proprioception()[1]
return proprio_vec
if len(self.env.visual_keys) > 0:
visual_obs = self.env.get_exteroception()
final_visual_obs = None
for key in self.env.visual_keys:
if final_visual_obs is None:
final_visual_obs = visual_obs[key]
else:
final_visual_obs = np.concatenate((final_visual_obs, visual_obs[key]), axis=-1)
_, proprio_vec, _ = self.env.get_proprioception()
observation = np.concatenate((final_visual_obs, proprio_vec))
else:
observation = self.env.get_obs() if observation is None else observation
return observation
def seed(self, seed):
return self.env.seed(seed)
def set_env_state(self, state_dict):
return self.env.set_env_state(state_dict)
def evaluate_success(self, paths, logger=None):
return self.env.evaluate_success(paths, logger=logger)
def make_env(env_name, cam_name, encoder, from_pixels):
if from_pixels:
visual_keys = []
assert encoder in ["vc1s", "vc1l", "r3m18", "rrl18", "rrl50", "r3m50", "2d", "1d", "proprio"]
if encoder == "1d" or encoder == "2d":
visual_keys = [f'rgb:{cam_name}:84x84:{encoder}']
elif encoder == 'proprio':
visual_keys = []
else:
# cam_name is a list of cameras
if type(cam_name) == ListConfig:
visual_keys = []
for cam in cam_name:
visual_keys.append(f'rgb:{cam}:224x224:{encoder}')
else:
visual_keys = [f'rgb:{cam_name}:224x224:{encoder}']
print(f"Using visual keys {visual_keys}")
env = ObservationWrapper(env_name, visual_keys=visual_keys, encoder=encoder)
else:
env = gym.make(env_name)
return env
@hydra.main(config_name="bc.yaml", config_path="config")
def main(job_data: DictConfig):
OmegaConf.resolve(job_data)
job_data['policy_size'] = tuple(job_data['policy_size'])
exp_start = time.time()
OUT_DIR = os.getcwd()
if not os.path.exists(OUT_DIR): os.mkdir(OUT_DIR)
if not os.path.exists(OUT_DIR+'/iterations'): os.mkdir(OUT_DIR+'/iterations')
if not os.path.exists(OUT_DIR+'/logs'): os.mkdir(OUT_DIR+'/logs')
if job_data['from_pixels'] == False:
job_data['env_name'] = job_data['env_name'].replace('_v2d', '')
#exp_name = OUT_DIR.split('/')[-1] ## TODO: Customizer for logging
# Unpack args and make files for easy access
#logger = DataLog()
exp_name = job_data['env_name'] + '_pixels' + str(job_data['from_pixels']) + '_' + job_data['encoder']
logger = WandbLogger(
exp_name=exp_name,
config=job_data,
name=exp_name,
project=job_data['wandb_project'],
entity=job_data['wandb_entity'],
mode=job_data['wandb_mode'],
)
ENV_NAME = job_data['env_name']
EXP_FILE = OUT_DIR + '/job_data.yaml'
SEED = job_data['seed']
# base cases
if 'device' not in job_data.keys(): job_data['device'] = 'cpu'
assert 'data_file' in job_data.keys()
yaml_config = OmegaConf.to_yaml(job_data)
with open(EXP_FILE, 'w') as file: yaml.dump(yaml_config, file)
env = make_env(
env_name=job_data["env_name"],
cam_name=job_data["cam_name"],
encoder=job_data["encoder"],
from_pixels=job_data["from_pixels"]
)
# ===============================================================================
# Setup functions and environment
# ===============================================================================
control_seed(SEED)
env.seed(SEED)
paths_trace = Trace.load(job_data['data_file'])
bc_paths = []
for key, path in paths_trace.items():
path_dict = {}
traj_len = path['observations'].shape[0]
obs_list = []
ep_reward = 0.0
env.reset()
init_state_dict = {}
t0 = time.time()
for key, value in path['env_infos']['state'].items():
init_state_dict[key] = value[0]
env.set_env_state(init_state_dict)
obs = env.get_obs()
for step in range(traj_len-1):
next_obs, reward, done, env_info = env.step(path["actions"][step])
ep_reward += reward
obs_list.append(obs)
obs = next_obs
t1 = time.time()
obs_np = np.stack(obs_list, axis=0)
path_dict['observations'] = obs_np # [:-1]
path_dict['actions'] = path['actions'][()][:-1]
path_dict['env_infos'] = {'solved': path['env_infos']['solved'][()]}
print(f"Time to convert one trajectory: {(t1-t0)/60:4.2f}")
print("Converted episode reward:", ep_reward)
print("Original episode reward:", np.sum(path["rewards"]))
print(key, path_dict['observations'].shape, path_dict['actions'].shape)
bc_paths.append(path_dict)
expert_success = env.evaluate_success(bc_paths)
print(f"{bcolors.BOLD}{bcolors.OKGREEN}{exp_name} {bcolors.ENDC}")
print(f"{bcolors.BOLD}{bcolors.OKGREEN}Expert Success Rate: {expert_success}. {bcolors.ENDC}")
observation_dim = bc_paths[0]['observations'].shape[-1]
action_dim = bc_paths[0]['actions'].shape[-1]
print(f'Policy obs dim {observation_dim} act dim {action_dim}')
policy = GMMPolicy(
# network_kwargs
input_size=observation_dim,
output_size=action_dim,
hidden_size=job_data['policy_size'][0],
num_layers=len(job_data['policy_size']),
min_std=0.0001,
num_modes=5,
activation="softplus",
low_eval_noise=False,
# loss_kwargs
)
set_transforms = False
# ===============================================================================
# Model training
# ===============================================================================
print(f"{bcolors.OKBLUE}Training BC{bcolors.ENDC}")
policy.to(job_data['device'])
bc_agent = BC(
bc_paths,
policy,
epochs=job_data['eval_every_n'],
batch_size=job_data['bc_batch_size'],
lr=job_data['bc_lr'],
loss_type='MLE',
save_logs=True,
logger=logger,
set_transforms=set_transforms,
)
for ind in range(0, job_data['bc_epochs'], job_data['eval_every_n']):
policy.train()
bc_agent.train()
# bc_agent.train_h5()
policy.eval()
_, success_rate = evaluate_policy(
env=env,
policy=policy,
eval_logger=logger,
epoch=ind+job_data['eval_every_n'],
num_episodes=job_data['eval_traj'],
seed=job_data['seed'] + 123,
verbose=True,
device='cpu',
)
policy.to(job_data['device'])
exp_end = time.time()
print(f"{bcolors.BOLD}{bcolors.OKGREEN}Success Rate: {success_rate}. Time: {(exp_end - exp_start)/60:4.2f} minutes.{bcolors.ENDC}")
exp_end = time.time()
print(f"{bcolors.BOLD}{bcolors.OKGREEN}Success Rate: {success_rate}. Time: {(exp_end - exp_start)/60:4.2f} minutes.{bcolors.ENDC}")
# pickle.dump(bc_agent, open(OUT_DIR + '/iterations/agent_final.pickle', 'wb'))
pickle.dump(policy, open(OUT_DIR + '/iterations/policy_final.pickle', 'wb'))
wandb.finish()
if __name__ == '__main__':
main()
|
agenthive-dev
|
scripts/bc/run_bc_h5.py
|
import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
class FCNetworkWithBatchNorm(nn.Module):
def __init__(self, obs_dim, act_dim,
hidden_sizes=(64,64),
nonlinearity='relu', # either 'tanh' or 'relu'
dropout=0, # probability to dropout activations (0 means no dropout)
*args, **kwargs,
):
super(FCNetworkWithBatchNorm, self).__init__()
self.obs_dim = obs_dim
self.act_dim = act_dim
assert type(hidden_sizes) == tuple
self.layer_sizes = (obs_dim, ) + hidden_sizes + (act_dim, )
self.device = 'cpu'
# hidden layers
self.fc_layers = nn.ModuleList([nn.Linear(self.layer_sizes[i], self.layer_sizes[i+1]) \
for i in range(len(self.layer_sizes) -1)])
self.nonlinearity = torch.relu if nonlinearity == 'relu' else torch.tanh
self.input_batchnorm = nn.BatchNorm1d(num_features=obs_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
out = x.to(self.device)
out = self.input_batchnorm(out)
for i in range(len(self.fc_layers)-1):
out = self.fc_layers[i](out)
out = self.dropout(out)
out = self.nonlinearity(out)
out = self.fc_layers[-1](out)
return out
def to(self, device):
self.device = device
return super().to(device)
def set_transformations(self, *args, **kwargs):
pass
class BatchNormMLP(nn.Module):
def __init__(self, env_spec=None,
action_dim=None,
observation_dim=None,
hidden_sizes=(64,64),
min_log_std=-3,
init_log_std=0,
seed=None,
nonlinearity='relu',
dropout=0,
device='cpu',
*args, **kwargs,
):
"""
:param env_spec: specifications of the env (see utils/gym_env.py)
:param hidden_sizes: network hidden layer sizes (currently 2 layers only)
:param min_log_std: log_std is clamped at this value and can't go below
:param init_log_std: initial log standard deviation
:param seed: random seed
"""
super(BatchNormMLP, self).__init__()
self.device = device
self.n = env_spec.observation_dim if observation_dim is None else observation_dim # number of states
self.m = env_spec.action_dim if action_dim is None else action_dim # number of actions
self.min_log_std = min_log_std
# Set seed
# ------------------------
if seed is not None:
torch.manual_seed(seed)
np.random.seed(seed)
# Policy network
# ------------------------
self.model = FCNetworkWithBatchNorm(self.n, self.m, hidden_sizes, nonlinearity, dropout)
# make weights small
for param in list(self.model.parameters())[-2:]: # only last layer
param.data = 1e-2 * param.data
self.log_std = Variable(torch.ones(self.m) * init_log_std, requires_grad=True)
self.trainable_params = list(self.model.parameters()) + [self.log_std]
self.model.eval()
# Easy access variables
# -------------------------
self.log_std_val = np.float64(self.log_std.data.numpy().ravel())
self.param_shapes = [p.data.numpy().shape for p in self.trainable_params]
self.param_sizes = [p.data.numpy().size for p in self.trainable_params]
self.d = np.sum(self.param_sizes) # total number of params
# Placeholders
# ------------------------
self.obs_var = Variable(torch.randn(self.n), requires_grad=False)
# Utility functions
# ============================================
def to(self, device):
super().to(device)
self.model = self.model.to(device)
print(self.model)
self.device = device
return self
# Main functions
# ============================================
def get_action(self, observation):
o = np.float32(observation.reshape(1, -1))
self.obs_var.data = torch.from_numpy(o)
mean = self.model(self.obs_var).to('cpu').data.numpy().ravel()
noise = np.exp(self.log_std_val) * np.random.randn(self.m)
action = mean + noise
return [action, {'mean': mean, 'log_std': self.log_std_val, 'evaluation': mean}]
# ============================================
def forward(self, observations):
if type(observations) == np.ndarray: observations = torch.from_numpy(observations).float()
assert type(observations) == torch.Tensor
observations = observations.to(self.device)
out = self.model(observations)
return out
|
agenthive-dev
|
scripts/bc/batch_norm_mlp.py
|
import torch
import numpy as np
import torch.nn as nn
import torch.distributions as D
import torch.nn.functional as F
class GMMPolicy(nn.Module):
def __init__(self,
# network_kwargs
input_size,
output_size,
hidden_size=1024,
num_layers=2,
min_std=0.0001,
num_modes=5,
activation="softplus",
low_eval_noise=False,
# loss_kwargs
loss_coef=1.0):
super().__init__()
self.num_modes = num_modes
self.output_size = output_size
self.min_std = min_std
if num_layers > 0:
sizes = [input_size] + [hidden_size] * num_layers
layers = [nn.BatchNorm1d(num_features=input_size)]
for i in range(num_layers):
layers += [nn.Linear(sizes[i], sizes[i+1]), nn.ReLU()]
layers += [nn.Linear(sizes[-2], sizes[-1])]
self.share = nn.Sequential(*layers)
else:
self.share = nn.Identity()
self.mean_layer = nn.Linear(hidden_size, output_size * num_modes)
self.logstd_layer = nn.Linear(hidden_size, output_size * num_modes)
self.logits_layer = nn.Linear(hidden_size, num_modes)
self.low_eval_noise = low_eval_noise
self.loss_coef = loss_coef
if activation == "softplus":
self.actv = F.softplus
else:
self.actv = torch.exp
self.trainable_params = list(self.share.parameters()) + \
list(self.mean_layer.parameters()) + \
list(self.logstd_layer.parameters()) + \
list(self.logits_layer.parameters())
def to(self, device):
super().to(device)
self.device = device
return self
def forward_fn(self, x):
# x: (B, input_size)
share = self.share(x)
means = self.mean_layer(share).view(-1, self.num_modes, self.output_size)
means = torch.tanh(means)
logits = self.logits_layer(share)
if self.training or not self.low_eval_noise:
logstds = self.logstd_layer(share).view(-1, self.num_modes, self.output_size)
stds = self.actv(logstds) + self.min_std
else:
stds = torch.ones_like(means) * 1e-4
return means, stds, logits
def get_action(self, observation):
o = np.float32(observation.reshape(1, -1))
o = torch.from_numpy(o).to(self.device)
means, stds, logits = self.forward_fn(o)
compo = D.Normal(loc=means, scale=stds)
compo = D.Independent(compo, 1)
mix = D.Categorical(logits=logits)
gmm = D.MixtureSameFamily(mixture_distribution=mix,
component_distribution=compo)
action = gmm.sample()
mean = gmm.mean
mean = mean.detach().cpu().numpy().ravel()
return [action, {'mean': mean, 'std': stds, 'evaluation': mean}]
def forward(self, x):
means, scales, logits = self.forward_fn(x)
compo = D.Normal(loc=means, scale=scales)
compo = D.Independent(compo, 1)
mix = D.Categorical(logits=logits)
gmm = D.MixtureSameFamily(mixture_distribution=mix,
component_distribution=compo)
return gmm
def mean_LL(self, x, target):
gmm_dist = self.forward(x)
# return mean, log_prob of the gmm
return gmm_dist.mean, gmm_dist.log_prob(target)
def loss_fn(self, gmm, target, reduction='mean'):
log_probs = gmm.log_prob(target)
loss = -log_probs
if reduction == 'mean':
return loss.mean() * self.loss_coef
elif reduction == 'none':
return loss * self.loss_coef
elif reduction == 'sum':
return loss.sum() * self.loss_coef
else:
raise NotImplementedError
|
agenthive-dev
|
scripts/bc/gmm_policy.py
|
import torch
from rlhive.rl_envs import RoboHiveEnv
from rlhive.sim_algos.helpers.rrl_transform import RRLTransform
from torchrl.envs import (
CatTensors,
DoubleToFloat,
ObservationNorm,
R3MTransform,
SelectTransform,
TransformedEnv,
)
from torchrl.envs.transforms import Compose, FlattenObservation, RewardScaling
from torchrl.envs.utils import set_exploration_mode
def make_env(task, visual_transform, reward_scaling, device):
assert visual_transform in ("rrl", "r3m")
base_env = RoboHiveEnv(task, device=device)
env = make_transformed_env(
env=base_env, reward_scaling=reward_scaling, visual_transform=visual_transform
)
print(env)
# exit()
return env
def make_transformed_env(
env,
reward_scaling=5.0,
visual_transform="r3m",
stats=None,
):
"""
Apply transforms to the env (such as reward scaling and state normalization)
"""
env = TransformedEnv(env, SelectTransform("solved", "pixels", "observation"))
if visual_transform == "rrl":
vec_keys = ["rrl_vec"]
selected_keys = ["observation", "rrl_vec"]
env.append_transform(
Compose(
RRLTransform("resnet50", in_keys=["pixels"], download=True),
FlattenObservation(-2, -1, in_keys=vec_keys),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
elif visual_transform == "r3m":
vec_keys = ["r3m_vec"]
selected_keys = ["observation", "r3m_vec"]
env.append_transform(
Compose(
R3MTransform("resnet50", in_keys=["pixels"], download=True),
FlattenObservation(-2, -1, in_keys=vec_keys),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
else:
raise NotImplementedError
env.append_transform(RewardScaling(loc=0.0, scale=reward_scaling))
out_key = "observation_vector"
env.append_transform(CatTensors(in_keys=selected_keys, out_key=out_key))
# we normalize the states
if stats is None:
_stats = {"loc": 0.0, "scale": 1.0}
else:
_stats = stats
env.append_transform(
ObservationNorm(**_stats, in_keys=[out_key], standard_normal=True)
)
env.append_transform(DoubleToFloat(in_keys=[out_key], in_keys_inv=[]))
return env
env = make_env(
task="visual_franka_slide_random-v3",
reward_scaling=5.0,
device=torch.device("cuda:0"),
visual_transform="rrl",
)
with torch.no_grad(), set_exploration_mode("random"):
td = env.reset()
td = env.rand_step()
print(td)
|
agenthive-dev
|
scripts/sac_mujoco/test.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import gc
import os
import hydra
import numpy as np
import torch
import torch.cuda
import tqdm
import wandb
from omegaconf import DictConfig
from rlhive.rl_envs import RoboHiveEnv
from rlhive.sim_algos.helpers.rrl_transform import RRLTransform
# from torchrl.objectives import SACLoss
from sac_loss import SACLoss
from torch import nn, optim
from torchrl.collectors import MultiaSyncDataCollector
from torchrl.data import TensorDictPrioritizedReplayBuffer, TensorDictReplayBuffer
from torchrl.data.replay_buffers.storages import LazyMemmapStorage
from torchrl.envs import (
CatTensors,
DoubleToFloat,
ObservationNorm,
R3MTransform,
SelectTransform,
TransformedEnv,
)
from torchrl.envs.transforms import Compose, FlattenObservation, RewardScaling
from torchrl.envs.utils import set_exploration_mode
from torchrl.modules import MLP, NormalParamWrapper, SafeModule
from torchrl.modules.distributions import TanhNormal
from torchrl.modules.tensordict_module.actors import ProbabilisticActor, ValueOperator
from torchrl.objectives import SoftUpdate
from torchrl.trainers import Recorder
os.environ["WANDB_MODE"] = "offline" # offline sync. TODO: Remove this behavior
def make_env(task, visual_transform, reward_scaling, device, from_pixels):
assert visual_transform in ("rrl", "r3m")
base_env = RoboHiveEnv(task, device=device)
env = make_transformed_env(
env=base_env,
reward_scaling=reward_scaling,
visual_transform=visual_transform,
from_pixels=from_pixels,
)
print(env)
return env
def make_transformed_env(
env,
from_pixels,
reward_scaling=5.0,
visual_transform="r3m",
stats=None,
):
"""
Apply transforms to the env (such as reward scaling and state normalization)
"""
if from_pixels:
env = TransformedEnv(env, SelectTransform("solved", "pixels", "observation"))
if visual_transform == "rrl":
vec_keys = ["rrl_vec"]
selected_keys = ["observation", "rrl_vec"]
env.append_transform(
Compose(
RRLTransform("resnet50", in_keys=["pixels"], download=True),
FlattenObservation(-2, -1, in_keys=vec_keys),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
elif visual_transform == "r3m":
vec_keys = ["r3m_vec"]
selected_keys = ["observation", "r3m_vec"]
env.append_transform(
Compose(
R3MTransform("resnet50", in_keys=["pixels"], download=True),
FlattenObservation(-2, -1, in_keys=vec_keys),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
else:
raise NotImplementedError
else:
env = TransformedEnv(env, SelectTransform("solved", "observation"))
selected_keys = ["observation"]
env.append_transform(RewardScaling(loc=0.0, scale=reward_scaling))
out_key = "observation_vector"
env.append_transform(CatTensors(in_keys=selected_keys, out_key=out_key))
# we normalize the states
if stats is None:
_stats = {"loc": 0.0, "scale": 1.0}
else:
_stats = stats
env.append_transform(
ObservationNorm(**_stats, in_keys=[out_key], standard_normal=True)
)
env.append_transform(DoubleToFloat(in_keys=[out_key], in_keys_inv=[]))
return env
def make_recorder(
task: str,
frame_skip: int,
record_interval: int,
actor_model_explore: object,
eval_traj: int,
env_configs: dict,
):
test_env = make_env(task=task, **env_configs)
recorder_obj = Recorder(
record_frames=eval_traj * test_env.horizon,
frame_skip=frame_skip,
policy_exploration=actor_model_explore,
recorder=test_env,
exploration_mode="mean",
record_interval=record_interval,
log_keys=["reward", "solved"],
out_keys={"reward": "r_evaluation", "solved": "success"},
)
return recorder_obj
def make_replay_buffer(
prb: bool,
buffer_size: int,
buffer_scratch_dir: str,
device: torch.device,
make_replay_buffer: int = 3,
):
if prb:
replay_buffer = TensorDictPrioritizedReplayBuffer(
alpha=0.7,
beta=0.5,
pin_memory=False,
prefetch=make_replay_buffer,
storage=LazyMemmapStorage(
buffer_size,
scratch_dir=buffer_scratch_dir,
device=device,
),
)
else:
replay_buffer = TensorDictReplayBuffer(
pin_memory=False,
prefetch=make_replay_buffer,
storage=LazyMemmapStorage(
buffer_size,
scratch_dir=buffer_scratch_dir,
device=device,
),
)
return replay_buffer
def evaluate_success(env_success_fn, td_record: dict, eval_traj: int):
td_record["success"] = td_record["success"].reshape((eval_traj, -1))
paths = []
for solved_traj in td_record["success"]:
path = {"env_infos": {"solved": solved_traj.data.cpu().numpy()}}
paths.append(path)
success_percentage = env_success_fn(paths)
return success_percentage
@hydra.main(config_name="sac.yaml", config_path="config")
def main(args: DictConfig):
device = (
torch.device("cuda:0")
if torch.cuda.is_available()
and torch.cuda.device_count() > 0
and args.device == "cuda:0"
else torch.device("cpu")
)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# Create Environment
env_configs = {
"reward_scaling": args.reward_scaling,
"visual_transform": args.visual_transform,
"device": args.device,
"from_pixels": args.from_pixels,
}
train_env = make_env(task=args.task, **env_configs)
# Create Agent
# Define Actor Network
in_keys = ["observation_vector"]
action_spec = train_env.action_spec
actor_net_kwargs = {
"num_cells": [256, 256],
"out_features": 2 * action_spec.shape[-1],
"activation_class": nn.ReLU,
}
actor_net = MLP(**actor_net_kwargs)
dist_class = TanhNormal
dist_kwargs = {
"min": action_spec.space.minimum,
"max": action_spec.space.maximum,
"tanh_loc": False,
}
actor_net = NormalParamWrapper(
actor_net,
scale_mapping=f"biased_softplus_{1.0}",
scale_lb=0.1,
)
in_keys_actor = in_keys
actor_module = SafeModule(
actor_net,
in_keys=in_keys_actor,
out_keys=[
"loc",
"scale",
],
)
actor = ProbabilisticActor(
spec=action_spec,
in_keys=["loc", "scale"],
module=actor_module,
distribution_class=dist_class,
distribution_kwargs=dist_kwargs,
default_interaction_mode="random",
return_log_prob=False,
)
# Define Critic Network
qvalue_net_kwargs = {
"num_cells": [256, 256],
"out_features": 1,
"activation_class": nn.ReLU,
}
qvalue_net = MLP(
**qvalue_net_kwargs,
)
qvalue = ValueOperator(
in_keys=["action"] + in_keys,
module=qvalue_net,
)
model = nn.ModuleList([actor, qvalue]).to(device)
# add forward pass for initialization with proof env
proof_env = make_env(task=args.task, **env_configs)
# init nets
with torch.no_grad(), set_exploration_mode("random"):
td = proof_env.reset()
td = td.to(device)
for net in model:
net(td)
del td
proof_env.close()
actor_model_explore = model[0]
# Create SAC loss
loss_module = SACLoss(
actor_network=model[0],
qvalue_network=model[1],
num_qvalue_nets=2,
gamma=args.gamma,
loss_function="smooth_l1",
)
# Define Target Network Updater
target_net_updater = SoftUpdate(loss_module, args.target_update_polyak)
# Make Off-Policy Collector
collector = MultiaSyncDataCollector(
create_env_fn=[train_env],
policy=actor_model_explore,
total_frames=args.total_frames,
max_frames_per_traj=args.frames_per_batch,
frames_per_batch=args.env_per_collector * args.frames_per_batch,
init_random_frames=args.init_random_frames,
reset_at_each_iter=False,
postproc=None,
split_trajs=True,
devices=[device], # device for execution
passing_devices=[device], # device where data will be stored and passed
seed=None,
pin_memory=False,
update_at_each_batch=False,
exploration_mode="random",
)
collector.set_seed(args.seed)
# Make Replay Buffer
replay_buffer = make_replay_buffer(
prb=args.prb,
buffer_size=args.buffer_size,
buffer_scratch_dir=args.buffer_scratch_dir,
device=device,
)
# Trajectory recorder for evaluation
recorder = make_recorder(
task=args.task,
frame_skip=args.frame_skip,
record_interval=args.record_interval,
actor_model_explore=actor_model_explore,
eval_traj=args.eval_traj,
env_configs=env_configs,
)
# Optimizers
params = list(loss_module.parameters()) + [loss_module.log_alpha]
optimizer_actor = optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
rewards = []
rewards_eval = []
# Main loop
target_net_updater.init_()
collected_frames = 0
episodes = 0
pbar = tqdm.tqdm(total=args.total_frames)
r0 = None
loss = None
with wandb.init(project="SAC_TorchRL", name=args.exp_name, config=args):
for i, tensordict in enumerate(collector):
# update weights of the inference policy
collector.update_policy_weights_()
if r0 is None:
r0 = tensordict["reward"].sum(-1).mean().item()
pbar.update(tensordict.numel())
# extend the replay buffer with the new data
if "mask" in tensordict.keys():
# if multi-step, a mask is present to help filter padded values
current_frames = tensordict["mask"].sum()
tensordict = tensordict[tensordict.get("mask").squeeze(-1)]
else:
tensordict = tensordict.view(-1)
current_frames = tensordict.numel()
collected_frames += current_frames
episodes += args.env_per_collector
replay_buffer.extend(tensordict.cpu())
# optimization steps
if collected_frames >= args.init_random_frames:
(
total_losses,
actor_losses,
q_losses,
alpha_losses,
alphas,
entropies,
) = ([], [], [], [], [], [])
for _ in range(
args.env_per_collector * args.frames_per_batch * args.utd_ratio
):
# sample from replay buffer
sampled_tensordict = replay_buffer.sample(args.batch_size).clone()
loss_td = loss_module(sampled_tensordict)
actor_loss = loss_td["loss_actor"]
q_loss = loss_td["loss_qvalue"]
alpha_loss = loss_td["loss_alpha"]
loss = actor_loss + q_loss + alpha_loss
optimizer_actor.zero_grad()
loss.backward()
optimizer_actor.step()
# update qnet_target params
target_net_updater.step()
# update priority
if args.prb:
replay_buffer.update_priority(sampled_tensordict)
total_losses.append(loss.item())
actor_losses.append(actor_loss.item())
q_losses.append(q_loss.item())
alpha_losses.append(alpha_loss.item())
alphas.append(loss_td["alpha"].item())
entropies.append(loss_td["entropy"].item())
rewards.append(
(i, tensordict["reward"].sum().item() / args.env_per_collector)
)
wandb.log(
{
"train_reward": rewards[-1][1],
"collected_frames": collected_frames,
"episodes": episodes,
}
)
if loss is not None:
wandb.log(
{
"total_loss": np.mean(total_losses),
"actor_loss": np.mean(actor_losses),
"q_loss": np.mean(q_losses),
"alpha_loss": np.mean(alpha_losses),
"alpha": np.mean(alphas),
"entropy": np.mean(entropies),
}
)
td_record = recorder(None)
success_percentage = evaluate_success(
env_success_fn=train_env.evaluate_success,
td_record=td_record,
eval_traj=args.eval_traj,
)
if td_record is not None:
rewards_eval.append(
(
i,
td_record["total_r_evaluation"]
/ 1, # divide by number of eval worker
)
)
wandb.log({"test_reward": rewards_eval[-1][1]})
wandb.log({"success": success_percentage})
if len(rewards_eval):
pbar.set_description(
f"reward: {rewards[-1][1]: 4.4f} (r0 = {r0: 4.4f}), test reward: {rewards_eval[-1][1]: 4.4f}"
)
del tensordict
gc.collect()
collector.shutdown()
if __name__ == "__main__":
main()
|
agenthive-dev
|
scripts/sac_mujoco/sac.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from numbers import Number
from typing import Union
import numpy as np
import torch
from tensordict.nn import TensorDictSequential
from tensordict.tensordict import TensorDict, TensorDictBase
from torch import Tensor
from torchrl.envs.utils import set_exploration_mode, step_mdp
from torchrl.modules import SafeModule
from torchrl.objectives.common import LossModule
from torchrl.objectives.utils import (
distance_loss,
next_state_value as get_next_state_value,
)
try:
from functorch import vmap
FUNCTORCH_ERR = ""
_has_functorch = True
except ImportError as err:
FUNCTORCH_ERR = str(err)
_has_functorch = False
class SACLoss(LossModule):
"""SAC Loss module.
Args:
actor_network (SafeModule): the actor to be trained
qvalue_network (SafeModule): a single Q-value network that will be multiplicated as many times as needed.
num_qvalue_nets (int, optional): Number of Q-value networks to be trained. Default is 10.
gamma (Number, optional): gamma decay factor. Default is 0.99.
priotity_key (str, optional): Key where to write the priority value for prioritized replay buffers. Default is
`"td_error"`.
loss_function (str, optional): loss function to be used for the Q-value. Can be one of `"smooth_l1"`, "l2",
"l1", Default is "smooth_l1".
alpha_init (float, optional): initial entropy multiplier.
Default is 1.0.
min_alpha (float, optional): min value of alpha.
Default is 0.1.
max_alpha (float, optional): max value of alpha.
Default is 10.0.
fixed_alpha (bool, optional): whether alpha should be trained to match a target entropy. Default is :obj:`False`.
target_entropy (Union[str, Number], optional): Target entropy for the stochastic policy. Default is "auto".
delay_qvalue (bool, optional): Whether to separate the target Q value networks from the Q value networks used
for data collection. Default is :obj:`False`.
gSDE (bool, optional): Knowing if gSDE is used is necessary to create random noise variables.
Default is False
"""
delay_actor: bool = False
_explicit: bool = True
def __init__(
self,
actor_network: SafeModule,
qvalue_network: SafeModule,
num_qvalue_nets: int = 2,
gamma: Number = 0.99,
priotity_key: str = "td_error",
loss_function: str = "smooth_l1",
alpha_init: float = 1.0,
min_alpha: float = 0.1,
max_alpha: float = 10.0,
fixed_alpha: bool = False,
target_entropy: Union[str, Number] = "auto",
delay_qvalue: bool = True,
gSDE: bool = False,
):
if not _has_functorch:
raise ImportError(
f"Failed to import functorch with error message:\n{FUNCTORCH_ERR}"
)
super().__init__()
self.convert_to_functional(
actor_network,
"actor_network",
create_target_params=self.delay_actor,
funs_to_decorate=["forward", "get_dist_params"],
)
# let's make sure that actor_network has `return_log_prob` set to True
self.actor_network.return_log_prob = True
self.delay_qvalue = delay_qvalue
self.convert_to_functional(
qvalue_network,
"qvalue_network",
num_qvalue_nets,
create_target_params=self.delay_qvalue,
compare_against=list(actor_network.parameters()),
)
self.num_qvalue_nets = num_qvalue_nets
self.register_buffer("gamma", torch.tensor(gamma))
self.priority_key = priotity_key
self.loss_function = loss_function
try:
device = next(self.parameters()).device
except AttributeError:
device = torch.device("cpu")
self.register_buffer("alpha_init", torch.tensor(alpha_init, device=device))
self.register_buffer(
"min_log_alpha", torch.tensor(min_alpha, device=device).log()
)
self.register_buffer(
"max_log_alpha", torch.tensor(max_alpha, device=device).log()
)
self.fixed_alpha = fixed_alpha
if fixed_alpha:
self.register_buffer(
"log_alpha", torch.tensor(math.log(alpha_init), device=device)
)
else:
self.register_parameter(
"log_alpha",
torch.nn.Parameter(torch.tensor(math.log(alpha_init), device=device)),
)
if target_entropy == "auto":
if actor_network.spec["action"] is None:
raise RuntimeError(
"Cannot infer the dimensionality of the action. Consider providing "
"the target entropy explicitely or provide the spec of the "
"action tensor in the actor network."
)
target_entropy = -float(np.prod(actor_network.spec["action"].shape))
self.register_buffer(
"target_entropy", torch.tensor(target_entropy, device=device)
)
self.gSDE = gSDE
@property
def alpha(self):
self.log_alpha.data.clamp_(self.min_log_alpha, self.max_log_alpha)
with torch.no_grad():
alpha = self.log_alpha.exp()
return alpha
def forward(self, tensordict: TensorDictBase) -> TensorDictBase:
if self._explicit:
# slow but explicit version
return self._forward_explicit(tensordict)
else:
return self._forward_vectorized(tensordict)
def _loss_alpha(self, log_pi: Tensor) -> Tensor:
if torch.is_grad_enabled() and not log_pi.requires_grad:
raise RuntimeError(
"expected log_pi to require gradient for the alpha loss)"
)
if self.target_entropy is not None:
# we can compute this loss even if log_alpha is not a parameter
alpha_loss = -self.log_alpha.exp() * (log_pi.detach() + self.target_entropy)
else:
# placeholder
alpha_loss = torch.zeros_like(log_pi)
return alpha_loss
def _forward_vectorized(self, tensordict: TensorDictBase) -> TensorDictBase:
obs_keys = self.actor_network.in_keys
tensordict_select = tensordict.select(
"reward", "done", "next", *obs_keys, "action"
)
actor_params = torch.stack(
[self.actor_network_params, self.target_actor_network_params], 0
)
tensordict_actor_grad = tensordict_select.select(
*obs_keys
) # to avoid overwriting keys
next_td_actor = step_mdp(tensordict_select).select(
*self.actor_network.in_keys
) # next_observation ->
tensordict_actor = torch.stack([tensordict_actor_grad, next_td_actor], 0)
tensordict_actor = tensordict_actor.contiguous()
with set_exploration_mode("random"):
if self.gSDE:
tensordict_actor.set(
"_eps_gSDE",
torch.zeros(tensordict_actor.shape, device=tensordict_actor.device),
)
# vmap doesn't support sampling, so we take it out from the vmap
td_params = vmap(self.actor_network.get_dist_params)(
tensordict_actor,
actor_params,
)
if isinstance(self.actor_network, TensorDictSequential):
sample_key = self.actor_network[-1].out_keys[0]
tensordict_actor_dist = self.actor_network.build_dist_from_params(
td_params
)
else:
sample_key = self.actor_network.out_keys[0]
tensordict_actor_dist = self.actor_network.build_dist_from_params(
td_params
)
tensordict_actor[sample_key] = self._rsample(tensordict_actor_dist)
tensordict_actor["sample_log_prob"] = tensordict_actor_dist.log_prob(
tensordict_actor[sample_key]
)
# repeat tensordict_actor to match the qvalue size
_actor_loss_td = (
tensordict_actor[0]
.select(*self.qvalue_network.in_keys)
.expand(self.num_qvalue_nets, *tensordict_actor[0].batch_size)
) # for actor loss
_qval_td = tensordict_select.select(*self.qvalue_network.in_keys).expand(
self.num_qvalue_nets,
*tensordict_select.select(*self.qvalue_network.in_keys).batch_size,
) # for qvalue loss
_next_val_td = (
tensordict_actor[1]
.select(*self.qvalue_network.in_keys)
.expand(self.num_qvalue_nets, *tensordict_actor[1].batch_size)
) # for next value estimation
tensordict_qval = torch.cat(
[
_actor_loss_td,
_next_val_td,
_qval_td,
],
0,
)
# cat params
q_params_detach = self.qvalue_network_params.detach()
qvalue_params = torch.cat(
[
q_params_detach,
self.target_qvalue_network_params,
self.qvalue_network_params,
],
0,
)
tensordict_qval = vmap(self.qvalue_network)(
tensordict_qval,
qvalue_params,
)
state_action_value = tensordict_qval.get("state_action_value").squeeze(-1)
(
state_action_value_actor,
next_state_action_value_qvalue,
state_action_value_qvalue,
) = state_action_value.split(
[self.num_qvalue_nets, self.num_qvalue_nets, self.num_qvalue_nets],
dim=0,
)
sample_log_prob = tensordict_actor.get("sample_log_prob").squeeze(-1)
(
action_log_prob_actor,
next_action_log_prob_qvalue,
) = sample_log_prob.unbind(0)
# E[alpha * log_pi(a) - Q(s, a)] where a is reparameterized
loss_actor = -(
state_action_value_actor.min(0)[0] - self.alpha * action_log_prob_actor
).mean()
next_state_value = (
next_state_action_value_qvalue.min(0)[0]
- self.alpha * next_action_log_prob_qvalue
)
target_value = get_next_state_value(
tensordict,
gamma=self.gamma,
pred_next_val=next_state_value,
)
pred_val = state_action_value_qvalue
td_error = (pred_val - target_value).pow(2)
loss_qval = (
distance_loss(
pred_val,
target_value.expand_as(pred_val),
loss_function=self.loss_function,
)
.mean(-1)
.sum()
* 0.5
)
tensordict.set("td_error", td_error.detach().max(0)[0])
loss_alpha = self._loss_alpha(sample_log_prob)
if not loss_qval.shape == loss_actor.shape:
raise RuntimeError(
f"QVal and actor loss have different shape: {loss_qval.shape} and {loss_actor.shape}"
)
td_out = TensorDict(
{
"loss_actor": loss_actor.mean(),
"loss_qvalue": loss_qval.mean(),
"loss_alpha": loss_alpha.mean(),
"alpha": self.alpha.detach(),
"entropy": -sample_log_prob.mean().detach(),
"state_action_value_actor": state_action_value_actor.mean().detach(),
"action_log_prob_actor": action_log_prob_actor.mean().detach(),
"next.state_value": next_state_value.mean().detach(),
"target_value": target_value.mean().detach(),
},
[],
)
return td_out
def _forward_explicit(self, tensordict: TensorDictBase) -> TensorDictBase:
loss_actor, sample_log_prob = self._loss_actor_explicit(tensordict.clone(False))
loss_qval, td_error = self._loss_qval_explicit(tensordict.clone(False))
tensordict.set("td_error", td_error.detach().max(0)[0])
loss_alpha = self._loss_alpha(sample_log_prob)
td_out = TensorDict(
{
"loss_actor": loss_actor.mean(),
"loss_qvalue": loss_qval.mean(),
"loss_alpha": loss_alpha.mean(),
"alpha": self.alpha.detach(),
"entropy": -sample_log_prob.mean().detach(),
# "state_action_value_actor": state_action_value_actor.mean().detach(),
# "action_log_prob_actor": action_log_prob_actor.mean().detach(),
# "next.state_value": next_state_value.mean().detach(),
# "target_value": target_value.mean().detach(),
},
[],
)
return td_out
def _rsample(
self,
dist,
):
# separated only for the purpose of making the sampling
# deterministic to compare methods
return dist.rsample()
def _sample_reparam(self, tensordict, params):
"""Given a policy param batch and input data in a tensordict, writes a reparam sample and log-prob key."""
with set_exploration_mode("random"):
if self.gSDE:
raise NotImplementedError
# vmap doesn't support sampling, so we take it out from the vmap
td_params = self.actor_network.get_dist_params(
tensordict,
params,
)
if isinstance(self.actor_network, TensorDictSequential):
sample_key = self.actor_network[-1].out_keys[0]
tensordict_actor_dist = self.actor_network.build_dist_from_params(
td_params
)
else:
sample_key = self.actor_network.out_keys[0]
tensordict_actor_dist = self.actor_network.build_dist_from_params(
td_params
)
tensordict[sample_key] = self._rsample(tensordict_actor_dist)
tensordict["sample_log_prob"] = tensordict_actor_dist.log_prob(
tensordict[sample_key]
)
return tensordict
def _loss_actor_explicit(self, tensordict):
tensordict_actor = tensordict.clone(False)
actor_params = self.actor_network_params
tensordict_actor = self._sample_reparam(tensordict_actor, actor_params)
action_log_prob_actor = tensordict_actor["sample_log_prob"]
tensordict_qval = tensordict_actor.select(*self.qvalue_network.in_keys).expand(
self.num_qvalue_nets, *tensordict_actor.batch_size
) # for actor loss
qvalue_params = self.qvalue_network_params.detach()
tensordict_qval = vmap(self.qvalue_network)(
tensordict_qval,
qvalue_params,
)
state_action_value_actor = tensordict_qval.get("state_action_value").squeeze(-1)
state_action_value_actor = state_action_value_actor.min(0)[0]
# E[alpha * log_pi(a) - Q(s, a)] where a is reparameterized
loss_actor = (
self.alpha * action_log_prob_actor - state_action_value_actor
).mean()
return loss_actor, action_log_prob_actor
def _loss_qval_explicit(self, tensordict):
next_tensordict = step_mdp(tensordict)
next_tensordict = self._sample_reparam(
next_tensordict, self.target_actor_network_params
)
next_action_log_prob_qvalue = next_tensordict["sample_log_prob"]
next_state_action_value_qvalue = vmap(self.qvalue_network, (None, 0))(
next_tensordict,
self.target_qvalue_network_params,
)["state_action_value"].squeeze(-1)
next_state_value = (
next_state_action_value_qvalue.min(0)[0]
- self.alpha * next_action_log_prob_qvalue
)
pred_val = vmap(self.qvalue_network, (None, 0))(
tensordict,
self.qvalue_network_params,
)["state_action_value"].squeeze(-1)
target_value = get_next_state_value(
tensordict,
gamma=self.gamma,
pred_next_val=next_state_value,
)
# 1/2 * E[Q(s,a) - (r + gamma * (Q(s,a)-alpha log pi(s, a)))
loss_qval = (
distance_loss(
pred_val,
target_value.expand_as(pred_val),
loss_function=self.loss_function,
)
.mean(-1)
.sum()
* 0.5
)
td_error = (pred_val - target_value).pow(2)
return loss_qval, td_error
if __name__ == "__main__":
from tensordict.nn import TensorDictModule
from torch import nn
from torchrl.data import BoundedTensorSpec
# Tests the vectorized version of SAC-v2 against plain implementation
from torchrl.modules import ProbabilisticActor, ValueOperator
from torchrl.modules.distributions import TanhNormal
torch.manual_seed(0)
action_spec = BoundedTensorSpec(-1, 1, shape=(3,))
class Splitter(nn.Linear):
def forward(self, x):
loc, scale = super().forward(x).chunk(2, -1)
return loc, scale.exp()
actor_module = TensorDictModule(
Splitter(6, 6), in_keys=["obs"], out_keys=["loc", "scale"]
)
actor = ProbabilisticActor(
spec=action_spec,
in_keys=["loc", "scale"],
module=actor_module,
distribution_class=TanhNormal,
default_interaction_mode="random",
return_log_prob=False,
)
class QVal(nn.Linear):
def forward(self, s: Tensor, a: Tensor) -> Tensor:
return super().forward(torch.cat([s, a], -1))
qvalue = ValueOperator(QVal(9, 1), in_keys=["obs", "action"])
_rsample_old = SACLoss._rsample
def _rsample_new(self, dist):
return torch.ones_like(_rsample_old(self, dist))
SACLoss._rsample = _rsample_new
loss = SACLoss(actor, qvalue)
for batch in ((), (2, 3)):
td_input = TensorDict(
{
"obs": torch.rand(*batch, 6),
"action": torch.rand(*batch, 3).clamp(-1, 1),
"next": {"obs": torch.rand(*batch, 6)},
"reward": torch.rand(*batch, 1),
"done": torch.zeros(*batch, 1, dtype=torch.bool),
},
batch,
)
loss._explicit = True
loss0 = loss(td_input)
loss._explicit = False
loss1 = loss(td_input)
print("a", loss0["loss_actor"] - loss1["loss_actor"])
print("q", loss0["loss_qvalue"] - loss1["loss_qvalue"])
|
agenthive-dev
|
scripts/sac_mujoco/sac_loss.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import dataclasses
import hydra
import torch.cuda
from hydra.core.config_store import ConfigStore
from rlhive.rl_envs import RoboHiveEnv
from torchrl.envs import (
CatTensors,
DoubleToFloat,
EnvCreator,
ObservationNorm,
ParallelEnv,
R3MTransform,
SelectTransform,
TransformedEnv,
)
from torchrl.envs.transforms import Compose, FlattenObservation, RewardScaling
from torchrl.envs.utils import set_exploration_mode
from torchrl.modules import OrnsteinUhlenbeckProcessWrapper
from torchrl.record import VideoRecorder
from torchrl.trainers.helpers.collectors import (
make_collector_offpolicy,
OffPolicyCollectorConfig,
)
from torchrl.trainers.helpers.envs import (
EnvConfig,
initialize_observation_norm_transforms,
retrieve_observation_norms_state_dict,
)
from torchrl.trainers.helpers.logger import LoggerConfig
from torchrl.trainers.helpers.losses import LossConfig, make_redq_loss
from torchrl.trainers.helpers.models import make_redq_model, REDQModelConfig
from torchrl.trainers.helpers.replay_buffer import make_replay_buffer, ReplayArgsConfig
from torchrl.trainers.helpers.trainers import make_trainer, TrainerConfig
from torchrl.trainers.loggers.utils import generate_exp_name, get_logger
def make_env(
task,
reward_scaling,
device,
obs_norm_state_dict=None,
action_dim_gsde=None,
state_dim_gsde=None,
):
base_env = RoboHiveEnv(task, device=device)
env = make_transformed_env(env=base_env, reward_scaling=reward_scaling)
if obs_norm_state_dict is not None:
obs_norm = ObservationNorm(
**obs_norm_state_dict, in_keys=["observation_vector"]
)
env.append_transform(obs_norm)
if action_dim_gsde is not None:
env.append_transform(
gSDENoise(action_dim=action_dim_gsde, state_dim=state_dim_gsde)
)
return env
def make_transformed_env(
env,
reward_scaling=5.0,
stats=None,
):
"""
Apply transforms to the env (such as reward scaling and state normalization)
"""
env = TransformedEnv(env, SelectTransform("solved", "pixels", "observation"))
env.append_transform(
Compose(
R3MTransform("resnet50", in_keys=["pixels"], download=True),
FlattenObservation(-2, -1, in_keys=["r3m_vec"]),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
env.append_transform(RewardScaling(loc=0.0, scale=reward_scaling))
selected_keys = ["r3m_vec", "observation"]
out_key = "observation_vector"
env.append_transform(CatTensors(in_keys=selected_keys, out_key=out_key))
# we normalize the states
if stats is None:
_stats = {"loc": 0.0, "scale": 1.0}
else:
_stats = stats
env.append_transform(
ObservationNorm(**_stats, in_keys=[out_key], standard_normal=True)
)
env.append_transform(DoubleToFloat(in_keys=[out_key], in_keys_inv=[]))
return env
config_fields = [
(config_field.name, config_field.type, config_field)
for config_cls in (
TrainerConfig,
OffPolicyCollectorConfig,
EnvConfig,
LossConfig,
REDQModelConfig,
LoggerConfig,
ReplayArgsConfig,
)
for config_field in dataclasses.fields(config_cls)
]
Config = dataclasses.make_dataclass(cls_name="Config", fields=config_fields)
cs = ConfigStore.instance()
cs.store(name="config", node=Config)
DEFAULT_REWARD_SCALING = {
"Hopper-v1": 5,
"Walker2d-v1": 5,
"HalfCheetah-v1": 5,
"cheetah": 5,
"Ant-v2": 5,
"Humanoid-v2": 20,
"humanoid": 100,
}
@hydra.main(version_base=None, config_path=".", config_name="config")
def main(cfg: "DictConfig"): # noqa: F821
device = (
torch.device("cpu")
if torch.cuda.device_count() == 0
else torch.device("cuda:0")
)
exp_name = generate_exp_name("REDQ", cfg.exp_name)
logger = get_logger(
logger_type=cfg.logger, logger_name="redq_logging", experiment_name=exp_name
)
key, init_env_steps = None, None
if not cfg.vecnorm and cfg.norm_stats:
if not hasattr(cfg, "init_env_steps"):
raise AttributeError("init_env_steps missing from arguments.")
key = ("next", "observation_vector")
init_env_steps = cfg.init_env_steps
proof_env = make_env(
task=cfg.env_name,
reward_scaling=cfg.reward_scaling,
device=device,
)
initialize_observation_norm_transforms(
proof_environment=proof_env, num_iter=init_env_steps, key=key
)
_, obs_norm_state_dict = retrieve_observation_norms_state_dict(proof_env)[0]
print(proof_env)
model = make_redq_model(
proof_env,
cfg=cfg,
device=device,
in_keys=["observation_vector"],
)
loss_module, target_net_updater = make_redq_loss(model, cfg)
actor_model_explore = model[0]
if cfg.ou_exploration:
if cfg.gSDE:
raise RuntimeError("gSDE and ou_exploration are incompatible")
actor_model_explore = OrnsteinUhlenbeckProcessWrapper(
actor_model_explore,
annealing_num_steps=cfg.annealing_frames,
sigma=cfg.ou_sigma,
theta=cfg.ou_theta,
).to(device)
if device == torch.device("cpu"):
# mostly for debugging
actor_model_explore.share_memory()
if cfg.gSDE:
with torch.no_grad(), set_exploration_mode("random"):
# get dimensions to build the parallel env
proof_td = actor_model_explore(proof_env.reset().to(device))
action_dim_gsde, state_dim_gsde = proof_td.get("_eps_gSDE").shape[-2:]
del proof_td
else:
action_dim_gsde, state_dim_gsde = None, None
proof_env.close()
create_env_fn = make_env( # Pass EnvBase instead of the create_env_fn
task=cfg.env_name,
reward_scaling=cfg.reward_scaling,
device=device,
obs_norm_state_dict=obs_norm_state_dict,
action_dim_gsde=action_dim_gsde,
state_dim_gsde=state_dim_gsde,
)
collector = make_collector_offpolicy(
make_env=create_env_fn,
actor_model_explore=actor_model_explore,
cfg=cfg,
# make_env_kwargs=[
# {"device": device} if device >= 0 else {}
# for device in args.env_rendering_devices
# ],
)
replay_buffer = make_replay_buffer(device, cfg)
# recorder = transformed_env_constructor(
# cfg,
# video_tag=video_tag,
# norm_obs_only=True,
# obs_norm_state_dict=obs_norm_state_dict,
# logger=logger,
# use_env_creator=False,
# )()
recorder = make_env(
task=cfg.env_name,
reward_scaling=cfg.reward_scaling,
device=device,
obs_norm_state_dict=obs_norm_state_dict,
action_dim_gsde=action_dim_gsde,
state_dim_gsde=state_dim_gsde,
)
# remove video recorder from recorder to have matching state_dict keys
if cfg.record_video:
recorder_rm = TransformedEnv(recorder.base_env)
for transform in recorder.transform:
if not isinstance(transform, VideoRecorder):
recorder_rm.append_transform(transform.clone())
else:
recorder_rm = recorder
if isinstance(create_env_fn, ParallelEnv):
recorder_rm.load_state_dict(create_env_fn.state_dict()["worker0"])
create_env_fn.close()
elif isinstance(create_env_fn, EnvCreator):
recorder_rm.load_state_dict(create_env_fn().state_dict())
else:
recorder_rm.load_state_dict(create_env_fn.state_dict())
# reset reward scaling
for t in recorder.transform:
if isinstance(t, RewardScaling):
t.scale.fill_(1.0)
t.loc.fill_(0.0)
trainer = make_trainer(
collector,
loss_module,
recorder,
target_net_updater,
actor_model_explore,
replay_buffer,
logger,
cfg,
)
final_seed = collector.set_seed(cfg.seed)
print(f"init seed: {cfg.seed}, final seed: {final_seed}")
trainer.train()
return (logger.log_dir, trainer._log_dict)
if __name__ == "__main__":
main()
|
agenthive-dev
|
scripts/redq/redq.py
|
"""
This is a job script for running policy gradient algorithms on gym tasks.
Separate job scripts are provided to run few other algorithms
- For DAPG see here: https://github.com/aravindr93/hand_dapg/tree/master/dapg/examples
- For model-based NPG see here: https://github.com/aravindr93/mjrl/tree/master/mjrl/algos/model_accel
"""
from mjrl.utils.gym_env import GymEnv
from mjrl.policies.gaussian_mlp import MLP
from mjrl.baselines.mlp_baseline import MLPBaseline
from mjrl.algos.npg_cg import NPG
from mjrl.algos.batch_reinforce import BatchREINFORCE
from mjrl.algos.ppo_clip import PPO
from mjrl.utils.train_agent import train_agent
from mjrl.utils.logger import DataLog
from omegaconf import open_dict
import os
import json
import gym
# import mjrl.envs
import time as timer
import robohive
from robohive.envs.env_variants import register_env_variant
def train_loop(job_data) -> None:
if 'env_hyper_params' in job_data.keys():
job_data.env = register_env_variant(job_data.env, job_data.env_hyper_params)
e = GymEnv(job_data.env)
policy_size = tuple(eval(job_data.policy_size))
vf_hidden_size = tuple(eval(job_data.vf_hidden_size))
policy = MLP(e.spec, hidden_sizes=policy_size, seed=job_data.seed,
init_log_std=job_data.init_log_std, min_log_std=job_data.min_log_std)
baseline = MLPBaseline(e.spec, reg_coef=1e-3, batch_size=job_data.vf_batch_size, hidden_sizes=vf_hidden_size,
epochs=job_data.vf_epochs, learn_rate=job_data.vf_learn_rate)
# Construct the algorithm
if job_data.algorithm == 'NPG':
# Other hyperparameters (like number of CG steps) can be specified in config for pass through
# or default hyperparameters will be used
agent = NPG(e, policy, baseline, normalized_step_size=job_data.rl_step_size,
seed=job_data.seed, save_logs=True, **job_data.alg_hyper_params)
elif job_data.algorithm == 'VPG':
agent = BatchREINFORCE(e, policy, baseline, learn_rate=job_data.rl_step_size,
seed=job_data.seed, save_logs=True, **job_data.alg_hyper_params)
elif job_data.algorithm == 'NVPG':
agent = BatchREINFORCE(e, policy, baseline, desired_kl=job_data.rl_step_size,
seed=job_data.seed, save_logs=True, **job_data.alg_hyper_params)
elif job_data.algorithm == 'PPO':
# There are many hyperparameters for PPO. They can be specified in config for pass through
# or defaults in the PPO algorithm will be used
agent = PPO(e, policy, baseline, save_logs=True, **job_data.alg_hyper_params)
else:
NotImplementedError("Algorithm not found")
# Update logger if WandB in Config
if 'wandb_params' in job_data.keys() and job_data['wandb_params']['use_wandb']==True:
if 'wandb_logdir' in job_data['wandb_params']:
job_data['wandb_params']['wandb_logdir'] = job_data['wandb_params']['wandb_logdir']
else:
with open_dict(job_data):
job_data.wandb_params.wandb_logdir = os.getcwd()
agent.logger = DataLog(**job_data['wandb_params'], wandb_config=job_data)
print("========================================")
print("Starting policy learning")
print("========================================")
ts = timer.time()
train_agent(job_name='.',
agent=agent,
seed=job_data.seed,
niter=job_data.rl_num_iter,
gamma=job_data.rl_gamma,
gae_lambda=job_data.rl_gae,
num_cpu=job_data.num_cpu,
sample_mode=job_data.sample_mode,
num_traj=job_data.rl_num_traj,
num_samples=job_data.rl_num_samples,
save_freq=job_data.save_freq,
evaluation_rollouts=job_data.eval_rollouts)
print("========================================")
print("Job Finished. Time taken = %f" % (timer.time()-ts))
print("========================================")
|
agenthive-dev
|
baselines/mjrl/mjrl_job_script.py
|
"""
This is a launcher script for launching mjrl training using hydra
"""
import os
import time as timer
import hydra
from omegaconf import DictConfig, OmegaConf
from mjrl_job_script import train_loop
# ===============================================================================
# Process Inputs and configure job
# ===============================================================================
@hydra.main(config_name="hydra_npg_config", config_path="config")
def configure_jobs(job_data):
print("========================================")
print("Job Configuration")
print("========================================")
OmegaConf.resolve(job_data) # resolve configs
assert 'algorithm' in job_data.keys()
assert any([job_data.algorithm == a for a in ['NPG', 'NVPG', 'VPG', 'PPO']])
assert 'sample_mode' in job_data.keys()
assert any([job_data.sample_mode == m for m in ['samples', 'trajectories']])
job_data.alg_hyper_params = dict() if 'alg_hyper_params' not in job_data.keys() else job_data.alg_hyper_params
with open('job_config.yaml', 'w') as fp:
OmegaConf.save(config=job_data, f=fp.name)
if job_data.sample_mode == 'trajectories':
assert 'rl_num_traj' in job_data.keys()
job_data.rl_num_samples = 0 # will be ignored
elif job_data.sample_mode == 'samples':
assert 'rl_num_samples' in job_data.keys()
job_data.rl_num_traj = 0 # will be ignored
else:
print("Unknown sampling mode. Choose either trajectories or samples")
exit()
print(OmegaConf.to_yaml(job_data, resolve=True))
train_loop(job_data)
if __name__ == "__main__":
configure_jobs()
|
agenthive-dev
|
baselines/mjrl/hydra_mjrl_launcher.py
|
import robohive
import click
DESC="""
Script to render trajectories embeded in the env"
"""
@click.command(help=DESC)
@click.option('-s', '--suite', type=str, help='environment suite to train', default="arms")
@click.option('-l', '--launcher', type=click.Choice(['', None, "local", "slurm"]), default='')
@click.option('-cn', '--config_name', type=str, default=None)
@click.option('-cp', '--config_path', type=str, default='config')
def get_train_cmd(suite, launcher, config_name, config_path):
# Resolve Suite
if suite=="multitask_":
envs = ",".join(robohive.robohive_multitask_suite)
if config_name==None:
config_name="hydra_kitchen_config.yaml"
elif suite=="arms":
envs = ",".join(robohive.robohive_arm_suite)
if config_name==None:
config_name="hydra_arms_config.yaml"
elif suite=="hands":
envs = ",".join(robohive.robohive_hand_suite)
if config_name==None:
config_name="hydra_hand_config.yaml"
elif suite=="quads":
envs = ",".join(robohive.robohive_quad_suite)
if config_name==None:
config_name="hydra_quads_config.yaml"
elif suite=="myobase":
envs = ",".join(robohive.robohive_myobase_suite)
if config_name==None:
config_name="hydra_myo_config.yaml"
elif suite=="myochallenge":
envs = ",".join(robohive.robohive_myochal_suite)
if config_name==None:
config_name="hydra_myo_config.yaml"
elif suite=="myodm":
envs = ",".join(robohive.robohive_myodm_suite)
if config_name==None:
config_name="hydra_myo_config.yaml"
else:
raise ValueError(f"Unsupported suite:{suite}")
# Resolve launcher
if launcher=='' or launcher==None:
launcher_spec = ''
else:
launcher_spec = f"--multirun hydra/output={launcher} hydra/launcher={launcher}"
# Get final training command
print(f"To train NPG via mjrl on {suite} suite, run the following command: ")
print(f"python hydra_mjrl_launcher.py --config-path {config_path} --config-name {config_name} {launcher_spec} env={envs} seed=1,2,3")
if __name__ == '__main__':
get_train_cmd()
|
agenthive-dev
|
baselines/mjrl/get_trian_cmd.py
|
#!/usr/bin/env python
"""
MIT License
Copyright (c) 2017 Guillaume Papin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
A wrapper script around clang-format, suitable for linting multiple files
and to use for continuous integration.
This is an alternative API for the clang-format command line.
It runs over multiple files and directories in parallel.
A diff output is produced and a sensible exit code is returned.
"""
import argparse
import difflib
import fnmatch
import multiprocessing
import os
import signal
import subprocess
import sys
import traceback
from functools import partial
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, "wb")
DEFAULT_EXTENSIONS = "c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx,cu"
class ExitStatus:
SUCCESS = 0
DIFF = 1
TROUBLE = 2
def list_files(files, recursive=False, extensions=None, exclude=None):
if extensions is None:
extensions = []
if exclude is None:
exclude = []
out = []
for file in files:
if recursive and os.path.isdir(file):
for dirpath, dnames, fnames in os.walk(file):
fpaths = [os.path.join(dirpath, fname) for fname in fnames]
for pattern in exclude:
# os.walk() supports trimming down the dnames list
# by modifying it in-place,
# to avoid unnecessary directory listings.
dnames[:] = [
x
for x in dnames
if not fnmatch.fnmatch(os.path.join(dirpath, x), pattern)
]
fpaths = [x for x in fpaths if not fnmatch.fnmatch(x, pattern)]
for f in fpaths:
ext = os.path.splitext(f)[1][1:]
if ext in extensions:
out.append(f)
else:
out.append(file)
return out
def make_diff(file, original, reformatted):
return list(
difflib.unified_diff(
original,
reformatted,
fromfile=f"{file}\t(original)",
tofile=f"{file}\t(reformatted)",
n=3,
)
)
class DiffError(Exception):
def __init__(self, message, errs=None):
super().__init__(message)
self.errs = errs or []
class UnexpectedError(Exception):
def __init__(self, message, exc=None):
super().__init__(message)
self.formatted_traceback = traceback.format_exc()
self.exc = exc
def run_clang_format_diff_wrapper(args, file):
try:
ret = run_clang_format_diff(args, file)
return ret
except DiffError:
raise
except Exception as e:
raise UnexpectedError(f"{file}: {e.__class__.__name__}: {e}", e)
def run_clang_format_diff(args, file):
try:
with open(file, encoding="utf-8") as f:
original = f.readlines()
except OSError as exc:
raise DiffError(str(exc))
invocation = [args.clang_format_executable, file]
# Use of utf-8 to decode the process output.
#
# Hopefully, this is the correct thing to do.
#
# It's done due to the following assumptions (which may be incorrect):
# - clang-format will returns the bytes read from the files as-is,
# without conversion, and it is already assumed that the files use utf-8.
# - if the diagnostics were internationalized, they would use utf-8:
# > Adding Translations to Clang
# >
# > Not possible yet!
# > Diagnostic strings should be written in UTF-8,
# > the client can translate to the relevant code page if needed.
# > Each translation completely replaces the format string
# > for the diagnostic.
# > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation
try:
proc = subprocess.Popen(
invocation,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
encoding="utf-8",
)
except OSError as exc:
raise DiffError(
f"Command '{subprocess.list2cmdline(invocation)}' failed to start: {exc}"
)
proc_stdout = proc.stdout
proc_stderr = proc.stderr
# hopefully the stderr pipe won't get full and block the process
outs = list(proc_stdout.readlines())
errs = list(proc_stderr.readlines())
proc.wait()
if proc.returncode:
raise DiffError(
"Command '{}' returned non-zero exit status {}".format(
subprocess.list2cmdline(invocation), proc.returncode
),
errs,
)
return make_diff(file, original, outs), errs
def bold_red(s):
return "\x1b[1m\x1b[31m" + s + "\x1b[0m"
def colorize(diff_lines):
def bold(s):
return "\x1b[1m" + s + "\x1b[0m"
def cyan(s):
return "\x1b[36m" + s + "\x1b[0m"
def green(s):
return "\x1b[32m" + s + "\x1b[0m"
def red(s):
return "\x1b[31m" + s + "\x1b[0m"
for line in diff_lines:
if line[:4] in ["--- ", "+++ "]:
yield bold(line)
elif line.startswith("@@ "):
yield cyan(line)
elif line.startswith("+"):
yield green(line)
elif line.startswith("-"):
yield red(line)
else:
yield line
def print_diff(diff_lines, use_color):
if use_color:
diff_lines = colorize(diff_lines)
sys.stdout.writelines(diff_lines)
def print_trouble(prog, message, use_colors):
error_text = "error:"
if use_colors:
error_text = bold_red(error_text)
print(f"{prog}: {error_text} {message}", file=sys.stderr)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--clang-format-executable",
metavar="EXECUTABLE",
help="path to the clang-format executable",
default="clang-format",
)
parser.add_argument(
"--extensions",
help=f"comma separated list of file extensions (default: {DEFAULT_EXTENSIONS})",
default=DEFAULT_EXTENSIONS,
)
parser.add_argument(
"-r",
"--recursive",
action="store_true",
help="run recursively over directories",
)
parser.add_argument("files", metavar="file", nargs="+")
parser.add_argument("-q", "--quiet", action="store_true")
parser.add_argument(
"-j",
metavar="N",
type=int,
default=0,
help="run N clang-format jobs in parallel (default number of cpus + 1)",
)
parser.add_argument(
"--color",
default="auto",
choices=["auto", "always", "never"],
help="show colored diff (default: auto)",
)
parser.add_argument(
"-e",
"--exclude",
metavar="PATTERN",
action="append",
default=[],
help="exclude paths matching the given glob-like pattern(s) from recursive search",
)
args = parser.parse_args()
# use default signal handling, like diff return SIGINT value on ^C
# https://bugs.python.org/issue14229#msg156446
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
signal.SIGPIPE
except AttributeError:
# compatibility, SIGPIPE does not exist on Windows
pass
else:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
colored_stdout = False
colored_stderr = False
if args.color == "always":
colored_stdout = True
colored_stderr = True
elif args.color == "auto":
colored_stdout = sys.stdout.isatty()
colored_stderr = sys.stderr.isatty()
version_invocation = [args.clang_format_executable, "--version"]
try:
subprocess.check_call(version_invocation, stdout=DEVNULL)
except subprocess.CalledProcessError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
return ExitStatus.TROUBLE
except OSError as e:
print_trouble(
parser.prog,
f"Command '{subprocess.list2cmdline(version_invocation)}' failed to start: {e}",
use_colors=colored_stderr,
)
return ExitStatus.TROUBLE
retcode = ExitStatus.SUCCESS
files = list_files(
args.files,
recursive=args.recursive,
exclude=args.exclude,
extensions=args.extensions.split(","),
)
if not files:
return
njobs = args.j
if njobs == 0:
njobs = multiprocessing.cpu_count() + 1
njobs = min(len(files), njobs)
if njobs == 1:
# execute directly instead of in a pool,
# less overhead, simpler stacktraces
it = (run_clang_format_diff_wrapper(args, file) for file in files)
pool = None
else:
pool = multiprocessing.Pool(njobs)
it = pool.imap_unordered(partial(run_clang_format_diff_wrapper, args), files)
while True:
try:
outs, errs = next(it)
except StopIteration:
break
except DiffError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
retcode = ExitStatus.TROUBLE
sys.stderr.writelines(e.errs)
except UnexpectedError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
sys.stderr.write(e.formatted_traceback)
retcode = ExitStatus.TROUBLE
# stop at the first unexpected error,
# something could be very wrong,
# don't process all files unnecessarily
if pool:
pool.terminate()
break
else:
sys.stderr.writelines(errs)
if outs == []:
continue
if not args.quiet:
print_diff(outs, use_color=colored_stdout)
if retcode == ExitStatus.SUCCESS:
retcode = ExitStatus.DIFF
return retcode
if __name__ == "__main__":
sys.exit(main())
|
agenthive-dev
|
.circleci/unittest/linux/scripts/run-clang-format.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup, find_packages
setup(
name="psvi",
version="0.1.0",
description="Setting up a python package for Bayesian inference using variational coresets",
author="Dionysis Manousakas",
author_email="[email protected]",
license="LICENSE",
packages=find_packages(include=["psvi", "psvi.*"]),
install_requires=[
"iopath==0.1.10",
"matplotlib>=3.5.2",
"numpy>=1.22.4",
"pandas>=1.4.3",
"Pillow==9.2.0",
"requests==2.25.1",
"scikit_learn>=1.1.1",
"setuptools>=59.6.0",
"torch>=1.12.0",
"torchvision==0.13.0",
"tqdm==4.64.0",
"TyXe @ git+https://github.com/TyXe-BDL/TyXe",
"arff==0.9",
"pystan==3.5.0",
],
keywords=[
"bilevel optimization",
"hypergradient",
"sampling",
"importance sampling",
"variational inference",
"Monte Carlo",
"Bayesian",
"neural networks",
"pruning",
"sparsity",
"coresets",
"distillation",
"meta-learning",
"inducing points",
"pseudodata",
"neural networks",
],
)
|
Blackbox-Coresets-VI-main
|
setup.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
Blackbox-Coresets-VI-main
|
psvi/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
r"""
Experiment execution script: Users can specify the dataset, the statistical model and the inference methods,
and this script will generate a dictionary with the predictive performance.
"""
# Import libraries
import argparse
import os
import pickle
from collections import defaultdict
from platform import architecture
from typing import Any, Dict, List
from psvi.inference.baselines import (
run_giga,
run_mfvi,
run_mfvi_subset,
run_opsvi,
run_random,
run_sparsevi,
run_mfvi_regressor,
run_mfvi_subset_regressor
)
from psvi.inference.psvi_classes import (
PSVI,
PSVIAFixedU,
PSVIAV,
PSVIFixedU,
PSVILearnV,
PSVI_Ablated,
PSVI_No_IW,
PSVI_No_Rescaling,
PSVIFreeV,
PSVI_regressor,
PSVILearnV_regressor,
PSVIAV_regressor,
)
from psvi.inference.sparsebbvi import run_sparsevi_with_bb_elbo
from psvi.models.logreg import *
from experiments_utils import read_dataset, read_regression_dataset
torch.autograd.set_detect_anomaly(True)
parser = argparse.ArgumentParser()
# Arguments for the experiment workflow
parser.add_argument(
"--fnm", default="results", type=str, help="Filename where results are stored"
)
parser.add_argument(
"--datasets",
default=["phishing"],
nargs="+",
choices=["webspam", "phishing", "adult", "MNIST", "halfmoon", "four_blobs", "sinus", "concrete", "energy", "power", "kin8nm", "protein", "naval", "yacht", "boston", "wine", "year", "synth_lr_10", "synth_lr_50", "synth_lr_200"],
type=str,
help="List of dataset names",
)
parser.add_argument(
"--methods",
default=["psvi_learn_v", "mfvi", "mfvi_subset"],
nargs="+",
type=str,
help="List of inference method names",
)
parser.add_argument("--mc_samples", default=10, type=int, help="Monte Carlo samples")
parser.add_argument("--num_epochs", default=301, type=int, help="Training epochs")
parser.add_argument(
"--num_trials",
default=3,
type=int,
help="Trials executed for each inference method",
)
parser.add_argument(
"--data_minibatch", default=128, type=int, help="Data minibatch size"
)
parser.add_argument(
"--inner_it",
default=100,
type=int,
help="Gradient steps in the inner problem of nested optimization",
)
parser.add_argument(
"--outer_it",
default=100,
type=int,
help="Gradient steps in the outer problem of nested optimization",
)
parser.add_argument(
"--trainer",
default="nested",
choices=["nested", "hyper", "joint"],
type=str,
help="Method for computation of hypergradient",
)
parser.add_argument(
"--diagonal",
action=argparse.BooleanOptionalAction,
help="Diagonal approximation of Gaussian covariance matrices used",
)
parser.add_argument(
"--architecture",
default="logistic_regression",
choices=["logistic_regression", "logistic_regression_fullcov", "fn", "fn2", "lenet", "regressor_net"],
type=str,
help="Model architecture",
)
parser.add_argument(
"--n_hidden",
default=40,
type=int,
help="Number of hidden units in feedforward neural architectures",
)
parser.add_argument(
"--n_layers",
default=1,
type=int,
help="Number of layers in feedforward neural architectures",
)
parser.add_argument(
"--log_every",
default=150,
type=int,
help="Frequency of logging evaluation results throughout training (in number of outer gradient iterations)",
)
parser.add_argument(
"--register_elbos",
action=argparse.BooleanOptionalAction,
help="Saving variational objectives values throughout inference for plotting",
)
parser.add_argument(
"--init_sd",
default=1e-6,
type=float,
help="Initialization of standard deviation for variational parameters",
)
parser.add_argument(
"--lr0net",
default=1e-3,
type=float,
help="Initial learning rate for model parameters optimizer",
)
parser.add_argument(
"--lr0u",
default=1e-4,
type=float,
help="Initial learning rate for optimizer of pseudocoreset point input coordinates u",
)
parser.add_argument(
"--lr0v",
default=1e-3,
type=float,
help="Initial learning rate for optimizer of coreset support coefficients",
)
parser.add_argument(
"--lr0z",
default=1e-3,
type=float,
help="Initial learning rate for optimizer of coreset points labels",
)
parser.add_argument(
"--lr0alpha",
default=1e-3,
type=float,
help="Initial learning rate for coreset likelihood rescaling coefficient",
)
parser.add_argument(
"--init_at",
default="subsample",
choices=["subsample", "random"],
type=str,
help="Method for coreset points initialization",
)
parser.add_argument(
"--compute_weights_entropy",
action=argparse.BooleanOptionalAction,
help="Comput entropy of weights for plotting",
)
parser.add_argument(
"--coreset_sizes",
default=[100],
nargs="+",
type=int,
help="List of sizes for coresets computed throughout the experiment, or subsamples used for baselines mfvi_subset and random",
)
parser.add_argument(
"--reset",
action=argparse.BooleanOptionalAction,
help="Reset model parameters over intervals during training",
)
parser.add_argument(
"--prune",
action=argparse.BooleanOptionalAction,
help="Prune to coreset of smaller size",
)
parser.add_argument(
"--prune_interval",
default=400,
type=int,
help="Gradient steps in the outer problem of nested optimization between prunning steps",
)
parser.add_argument(
"--prune_sizes",
default=[20],
nargs="+",
type=int,
help="List of sizes for coresets in a pruning experiment (decreasing)",
)
parser.add_argument(
"--increment",
action=argparse.BooleanOptionalAction,
help="Learn tasks incrementally",
)
parser.add_argument(
"--increment_interval",
default=1000,
type=int,
help="Gradient steps in the outer problem of nested optimization between incremental learning stages",
)
parser.add_argument(
"--increment_sizes",
default=[20],
nargs="+",
type=int,
help="List of sizes for coresets in the incremental learning setting (non-decreasing)",
)
parser.add_argument(
"--retrain_on_coreset",
action=argparse.BooleanOptionalAction,
help="Retrain the variational model restricted only on the extracted coreset datapoints for the same number of epochs",
)
parser.add_argument(
"--save_input_data",
action=argparse.BooleanOptionalAction,
help="Save input dataset",
)
parser.add_argument(
"--test_ratio", default=0.2, type=float, help="Ratio of test dataset size"
)
parser.add_argument(
"--log_pseudodata",
action=argparse.BooleanOptionalAction,
help="Store pseudodata for visualisation",
)
parser.add_argument(
"--data_folder",
default="../data",
type=str,
help="Folder where dataset gets stored",
)
parser.add_argument(
"--results_folder",
default="../results",
type=str,
help="Folder where evaluation files get stored",
)
parser.add_argument(
"--learn_z",
action=argparse.BooleanOptionalAction,
help="Learn soft labels for distilled data",
)
parser.add_argument(
"--gamma", default=1., type=float, help="Decay factor of learning rate"
)
parser.set_defaults(
diagonal=True,
reset=False,
compute_weights_entropy=False,
register_elbos=False,
save_input_data=False,
prune=False,
increment=False,
log_pseudodata=False,
retrain_on_coreset=False,
learn_z=False,
)
parsed_args = parser.parse_args()
method_args = vars(parsed_args)
datasets, methods = method_args["datasets"], method_args["methods"]
method_args["logistic_regression"] = method_args['architecture'] == 'logistic_regression'
[
os.makedirs(fold)
for fold in [method_args["data_folder"], method_args["results_folder"]]
if not os.path.exists(fold)
] # make folders for data and results storage
def rec_dd():
return defaultdict(rec_dd)
results = rec_dd() # recursive dictionary for storage of inference results
# Specify inference methods
inf_dict = {
"psvi": (lambda *args, **kwargs: PSVI(*args, **kwargs).run_psvi(*args, **kwargs)),
"psvi_ablated": (
lambda *args, **kwargs: PSVI_Ablated(*args, **kwargs).run_psvi(*args, **kwargs)
),
"psvi_learn_v": (
lambda *args, **kwargs: PSVILearnV(*args, **kwargs).run_psvi(*args, **kwargs)
),
"psvi_alpha_v": (
lambda *args, **kwargs: PSVIAV(*args, **kwargs).run_psvi(*args, **kwargs)
),
"psvi_no_iw": (
lambda *args, **kwargs: PSVI_No_IW(*args, **kwargs).run_psvi(*args, **kwargs)
),
"psvi_free_v": (
lambda *args, **kwargs: PSVIFreeV(*args, **kwargs).run_psvi(*args, **kwargs)
),
"psvi_no_rescaling": (
lambda *args, **kwargs: PSVI_No_Rescaling(*args, **kwargs).run_psvi(
*args, **kwargs
)
),
"psvi_fixed_u": (
lambda *args, **kwargs: PSVIFixedU(*args, **kwargs).run_psvi(*args, **kwargs)
),
"psvi_alpha_fixed_u": (
lambda *args, **kwargs: PSVIAFixedU(*args, **kwargs).run_psvi(
*args, **kwargs
)
),
"psvi_regressor": (
lambda *args, **kwargs: PSVI_regressor(*args, **kwargs).run_psvi(*args, **kwargs)
),
"psvi_alpha_v_regressor": (
lambda *args, **kwargs: PSVIAV_regressor(*args, **kwargs).run_psvi(*args, **kwargs)
),
"psvi_learn_v_regressor": (
lambda *args, **kwargs: PSVILearnV_regressor(*args, **kwargs).run_psvi(*args, **kwargs)
),
"sparsebbvi": run_sparsevi_with_bb_elbo,
"opsvi": run_opsvi,
"random": run_random,
"sparsevi": run_sparsevi,
"giga": run_giga,
"mfvi": run_mfvi,
"mfvi_subset": run_mfvi_subset,
"mfvi_regressor": run_mfvi_regressor,
"mfvi_subset_regressor": run_mfvi_subset_regressor,
}
def experiment_driver(
datasets: List[str],
methods: Dict[str, bool],
method_args: Dict[str, Any],
) -> None:
r"""
Run experiment
"""
for dnm in datasets:
# Read the dataset
print(f"\nReading/Generating the dataset {dnm.upper()}")
x, y, xt, yt, N, D, train_dataset, test_dataset, num_classes = read_dataset(
dnm, method_args
)
print(
f"\nBayesian {'logistic regression' if method_args['logistic_regression'] else 'neural network'} experiment.\nInference via {' '.join(map(lambda x:x.upper(), methods))} on {dnm} data over {method_args['num_trials']} {'independent trials.' if method_args['num_trials']>1 else 'trial.'}\n\n\n"
)
for nm_alg in methods:
print(f"\n\nRunning {nm_alg}\n")
logistic_regression = method_args.get(
"logistic_regression", method_args.get("architecture") == "logreg"
)
inf_alg = inf_dict[nm_alg]
compute_weights_entropy = (
not nm_alg.startswith(("opsvi", "mfvi_subset"))
) and method_args["compute_weights_entropy"]
tps = (
method_args["coreset_sizes"]
if nm_alg.startswith(("psvi", "opsvi", "mfvi_subset"))
else [-1]
) # alias for baselines with no explicit constraint on dataset size
for t in range(method_args["num_trials"]):
print(f"Trial #{t}")
for (
ps
) in tps: # range of pseudocoreset sizes tested over the experiment
print(
f"Coreset/Subset with {ps if not method_args['increment'] else method_args['increment_sizes'][0]} datapoints"
) if ps != -1 else print("Unconstrained data access")
results[dnm][nm_alg][ps][t] = inf_alg(
mc_samples=method_args["mc_samples"],
num_epochs=method_args["num_epochs"],
data_minibatch=method_args["data_minibatch"],
D=D,
N=N,
tr=t,
diagonal=method_args["diagonal"],
x=x,
y=y,
xt=xt,
yt=yt,
inner_it=method_args["inner_it"],
outer_it=method_args["outer_it"],
scatterplot_coreset=method_args.get(
"scatterplot_coreset"
), # not parsed for some methods atm
logistic_regression=logistic_regression,
trainer=method_args["trainer"],
log_every=method_args["log_every"],
register_elbos=method_args["register_elbos"],
lr0u=method_args["lr0u"],
lr0net=method_args["lr0net"],
lr0v=method_args["lr0v"],
lr0z=method_args["lr0z"],
lr0alpha=method_args["lr0alpha"],
init_args=method_args["init_at"],
init_sd=method_args[
"init_sd"
], # initialization of variance in variational model
num_pseudo=ps,
seed=t, # map random seed to the trial number for reproducibility of inference result at the beginning of each of the baseline
compute_weights_entropy=compute_weights_entropy,
reset=method_args.get("reset"),
reset_interval=method_args.get("reset_interval"),
architecture=method_args.get("architecture"),
log_pseudodata=method_args.get("log_pseudodata"),
n_hidden=method_args.get(
"n_hidden", 40
), # hidden units in nn architecture
n_layers=method_args.get("n_layers", 1),
train_dataset=train_dataset,
test_dataset=test_dataset,
dnm=dnm,
nc=num_classes,
prune=method_args.get("prune"),
prune_interval=method_args.get("prune_interval"),
prune_sizes=method_args.get("prune_sizes"),
increment=method_args.get("increment"),
increment_interval=method_args.get("increment_interval"),
increment_sizes=method_args.get("increment_sizes"),
retrain_on_coreset=method_args.get("retrain_on_coreset"),
learn_z=method_args["learn_z"],
)
print("Trial completed!\n")
return write_to_files(results, method_args["fnm"])
def regressor_experiment_driver(
datasets: List[str],
methods: Dict[str, bool],
method_args: Dict[str, Any],
) -> None:
r"""
Run BNN regression experiment
"""
for dnm in datasets:
# Read the dataset
print(f"\nReading/Generating the dataset {dnm.upper()}")
method_args["seed"], method_args["num_test"] = 42, .15
x, y, xv, yv, xt, yt, N, D, train_dataset, val_dataset, test_dataset, y_mean, y_std, taus = read_regression_dataset(
dnm, method_args
)
print(
f"\nRegression experiment using BNNs.\nInference via {' '.join(map(lambda x:x.upper(), methods))} on {dnm} data over {method_args['num_trials']} {'independent trials.' if method_args['num_trials']>1 else 'trial.'}\n\n\n"
)
for nm_alg in methods:
print(f"\n\nRunning {nm_alg}\n")
logistic_regression = False
inf_alg = inf_dict[nm_alg + "_regressor"]
compute_weights_entropy = (
not nm_alg.startswith("mfvi_subset")
) and method_args["compute_weights_entropy"]
tps = (
method_args["coreset_sizes"]
if nm_alg.startswith(("psvi", "mfvi_subset"))
else [-1]
) # alias for baselines with no explicit constraint on dataset size
for t in range(method_args["num_trials"]):
print(f"Trial #{t}")
for (
ps
) in tps: # range of pseudocoreset sizes tested over the experiment
print(
f"Coreset/Subset with {ps} datapoints"
) if ps != -1 else print("Unconstrained data access")
results[dnm][nm_alg][ps][t] = inf_alg(
mc_samples=method_args["mc_samples"],
num_epochs=method_args["num_epochs"],
data_minibatch=method_args["data_minibatch"],
D=D,
N=N,
tr=t,
diagonal=method_args["diagonal"],
x=x,
y=y,
xv=xv,
yv=yv,
xt=xt,
yt=yt,
inner_it=method_args["inner_it"],
outer_it=method_args["outer_it"],
scatterplot_coreset=method_args.get(
"scatterplot_coreset"
), # not parsed for some methods atm
logistic_regression=logistic_regression,
trainer=method_args["trainer"],
log_every=method_args["log_every"],
register_elbos=method_args["register_elbos"],
lr0u=method_args["lr0u"],
lr0net=method_args["lr0net"],
lr0v=method_args["lr0v"],
init_args=method_args["init_at"],
init_sd=method_args[
"init_sd"
], # initialization of variance in variational model
num_pseudo=ps,
seed=t, # map random seed to the trial number for reproducibility of inference result at the beginning of each of the baseline
compute_weights_entropy=compute_weights_entropy,
reset=method_args.get("reset"),
reset_interval=method_args.get("reset_interval"),
architecture=method_args.get("architecture"),
log_pseudodata=method_args.get("log_pseudodata"),
n_hidden=method_args.get(
"n_hidden", 40
), # hidden units in nn architecture
n_layers=method_args.get("n_layers", 1),
train_dataset=train_dataset,
val_dataset=val_dataset,
test_dataset=test_dataset,
dnm=dnm,
y_mean=y_mean,
y_std=y_std,
taus=taus,
)
print("Trial completed!\n")
return write_to_files(results, method_args["fnm"])
def write_to_files(results: Dict[str, Any], fnm: str) -> None:
r"""
Write results to pk files
"""
res_fnm = f"{method_args['results_folder']}/{fnm}.pk"
print(f"Storing results in {res_fnm}")
with open(res_fnm, "wb") as outfile:
pickle.dump(results, outfile)
## Entry point
if __name__ == "__main__":
(experiment_driver(
datasets,
methods,
method_args,
) if method_args.get("architecture") != "regressor_net"
else regressor_experiment_driver( datasets,
methods,
method_args))# run experiment
|
Blackbox-Coresets-VI-main
|
psvi/experiments/flow_psvi.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
Blackbox-Coresets-VI-main
|
psvi/experiments/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
ADAPTATION OF flow_psvi FOR MULTI-GPU PLATFORMS
"""
r"""
Experiment execution script: Users can specify the dataset, the statistical model and the inference methods,
and this script will generate a dictionary with the predictive performance.
"""
# Import libraries
import argparse
import os
import pickle
from collections import defaultdict
from platform import architecture
from typing import Any, Dict, List
import concurrent
import tqdm
import random
from psvi.inference.baselines import (
run_giga,
run_mfvi,
run_mfvi_subset,
run_opsvi,
run_random,
run_sparsevi,
run_mfvi_regressor,
run_mfvi_subset_regressor
)
from psvi.inference.psvi_classes import (
PSVI,
PSVIAFixedU,
PSVIAV,
PSVIFixedU,
PSVILearnV,
PSVI_Ablated,
PSVI_No_IW,
PSVI_No_Rescaling,
PSVIFreeV,
PSVI_regressor,
PSVILearnV_regressor,
PSVIAV_regressor,
)
from psvi.inference.sparsebbvi import run_sparsevi_with_bb_elbo
from psvi.models.logreg import *
from experiments_utils import read_dataset, read_regression_dataset
import multiprocessing
from multiprocessing import set_start_method
torch.autograd.set_detect_anomaly(True)
NUM_GPUS = 8
parser = argparse.ArgumentParser()
# Arguments for the experiment workflow
parser.add_argument(
"--fnm", default="results", type=str, help="Filename where results are stored"
)
parser.add_argument(
"--datasets",
default=["phishing"],
nargs="+",
choices=["webspam", "phishing", "adult", "MNIST", "halfmoon", "four_blobs", "sinus", "concrete", "energy", "power", "kin8nm", "protein", "naval", "yacht", "boston", "wine", "year", "synth_lr_10", "synth_lr_50", "synth_lr_200"],
type=str,
help="List of dataset names",
)
parser.add_argument(
"--methods",
default=["psvi_learn_v", "mfvi", "mfvi_subset"],
nargs="+",
type=str,
help="List of inference method names",
)
parser.add_argument("--mc_samples", default=10, type=int, help="Monte Carlo samples")
parser.add_argument("--num_epochs", default=301, type=int, help="Training epochs")
parser.add_argument(
"--num_trials",
default=3,
type=int,
help="Trials executed for each inference method",
)
parser.add_argument(
"--data_minibatch", default=128, type=int, help="Data minibatch size"
)
parser.add_argument(
"--inner_it",
default=100,
type=int,
help="Gradient steps in the inner problem of nested optimization",
)
parser.add_argument(
"--outer_it",
default=100,
type=int,
help="Gradient steps in the outer problem of nested optimization",
)
parser.add_argument(
"--trainer",
default="nested",
choices=["nested", "hyper", "joint"],
type=str,
help="Method for computation of hypergradient",
)
parser.add_argument(
"--diagonal",
action=argparse.BooleanOptionalAction,
help="Diagonal approximation of Gaussian covariance matrices used",
)
parser.add_argument(
"--architecture",
default="logistic_regression",
choices=["logistic_regression", "logistic_regression_fullcov", "fn", "fn2", "lenet", "regressor_net"],
type=str,
help="Model architecture",
)
parser.add_argument(
"--n_hidden",
default=40,
type=int,
help="Number of hidden units in feedforward neural architectures",
)
parser.add_argument(
"--n_layers",
default=1,
type=int,
help="Number of layers in feedforward neural architectures",
)
parser.add_argument(
"--log_every",
default=150,
type=int,
help="Frequency of logging evaluation results throughout training (in number of outer gradient iterations)",
)
parser.add_argument(
"--register_elbos",
action=argparse.BooleanOptionalAction,
help="Saving variational objectives values throughout inference for plotting",
)
parser.add_argument(
"--init_sd",
default=1e-6,
type=float,
help="Initialization of standard deviation for variational parameters",
)
parser.add_argument(
"--lr0net",
default=1e-3,
type=float,
help="Initial learning rate for model parameters optimizer",
)
parser.add_argument(
"--lr0u",
default=1e-4,
type=float,
help="Initial learning rate for optimizer of pseudocoreset point input coordinates u",
)
parser.add_argument(
"--lr0v",
default=1e-3,
type=float,
help="Initial learning rate for optimizer of coreset support coefficients",
)
parser.add_argument(
"--lr0z",
default=1e-3,
type=float,
help="Initial learning rate for optimizer of coreset points labels",
)
parser.add_argument(
"--lr0alpha",
default=1e-3,
type=float,
help="Initial learning rate for coreset likelihood rescaling coefficient",
)
parser.add_argument(
"--init_at",
default="subsample",
choices=["subsample", "random"],
type=str,
help="Method for coreset points initialization",
)
parser.add_argument(
"--compute_weights_entropy",
action=argparse.BooleanOptionalAction,
help="Comput entropy of weights for plotting",
)
parser.add_argument(
"--coreset_sizes",
default=[100],
nargs="+",
type=int,
help="List of sizes for coresets computed throughout the experiment, or subsamples used for baselines mfvi_subset and random",
)
parser.add_argument(
"--reset",
action=argparse.BooleanOptionalAction,
help="Reset model parameters over intervals during training",
)
parser.add_argument(
"--prune",
action=argparse.BooleanOptionalAction,
help="Prune to coreset of smaller size",
)
parser.add_argument(
"--prune_interval",
default=400,
type=int,
help="Gradient steps in the outer problem of nested optimization between prunning steps",
)
parser.add_argument(
"--prune_sizes",
default=[20],
nargs="+",
type=int,
help="List of sizes for coresets in a pruning experiment (decreasing)",
)
parser.add_argument(
"--increment",
action=argparse.BooleanOptionalAction,
help="Learn tasks incrementally",
)
parser.add_argument(
"--increment_interval",
default=1000,
type=int,
help="Gradient steps in the outer problem of nested optimization between incremental learning stages",
)
parser.add_argument(
"--increment_sizes",
default=[20],
nargs="+",
type=int,
help="List of sizes for coresets in the incremental learning setting (non-decreasing)",
)
parser.add_argument(
"--retrain_on_coreset",
action=argparse.BooleanOptionalAction,
help="Retrain the variational model restricted only on the extracted coreset datapoints for the same number of epochs",
)
parser.add_argument(
"--save_input_data",
action=argparse.BooleanOptionalAction,
help="Save input dataset",
)
parser.add_argument(
"--test_ratio", default=0.2, type=float, help="Ratio of test dataset size"
)
parser.add_argument(
"--log_pseudodata",
action=argparse.BooleanOptionalAction,
help="Store pseudodata for visualisation",
)
parser.add_argument(
"--data_folder",
default="../data",
type=str,
help="Folder where dataset gets stored",
)
parser.add_argument(
"--results_folder",
default="../results",
type=str,
help="Folder where evaluation files get stored",
)
parser.add_argument(
"--learn_z",
action=argparse.BooleanOptionalAction,
help="Learn soft labels for distilled data",
)
parser.add_argument(
"--gamma", default=1., type=float, help="Decay factor of learning rate"
)
parser.set_defaults(
diagonal=True,
reset=False,
compute_weights_entropy=False,
register_elbos=False,
save_input_data=False,
prune=False,
increment=False,
log_pseudodata=False,
retrain_on_coreset=False,
learn_z=False,
)
parsed_args = parser.parse_args()
method_args = vars(parsed_args)
datasets, methods = method_args["datasets"], method_args["methods"]
method_args["logistic_regression"] = method_args['architecture'] == 'logistic_regression'
[
os.makedirs(fold)
for fold in [method_args["data_folder"], method_args["results_folder"]]
if not os.path.exists(fold)
] # make folders for data and results storage
def pass_dict(d, f):
return f(**d)
def rec_dd():
return defaultdict(rec_dd)
results = rec_dd() # recursive dictionary for storage of inference results
# Specify inference methods
def inf_alg(**kwargs):
if kwargs["nm_alg"]=="psvi":
return PSVI(**kwargs).run_psvi( **kwargs)
elif kwargs["nm_alg"]=="psvi_ablated":
return PSVI_Ablated( **kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_learn_v":
return PSVILearnV( **kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_alpha_v":
return PSVIAV(**kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_no_iw":
return PSVI_No_IW(**kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_free_v":
return PSVIFreeV(**kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_no_rescaling":
return PSVI_No_Rescaling(**kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_fixed_u":
return PSVIFixedU(**kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_alpha_fixed_u":
return PSVIAFixedU(**kwargs).run_psvi(**kwargs )
elif kwargs["nm_alg"]=="psvi_regressor":
return PSVI_regressor(**kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_alpha_v_regressor":
return PSVIAV_regressor(**kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_learn_v_regressor":
return PSVILearnV_regressor(**kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="sparsebbvi":
return run_sparsevi_with_bb_elbo
elif kwargs["nm_alg"]=="opsvi":
return run_opsvi
elif kwargs["nm_alg"]=="random":
return run_random
elif kwargs["nm_alg"]=="sparsevi":
return run_sparsevi
elif kwargs["nm_alg"]=="giga":
return run_giga
elif kwargs["nm_alg"]=="mfvi":
return run_mfvi
elif kwargs["nm_alg"]=="mfvi_subset":
return run_mfvi_subset
elif kwargs["nm_alg"]=="mfvi_regressor":
return run_mfvi_regressor
elif kwargs["nm_alg"]=="mfvi_subset_regressor":
return run_mfvi_subset_regressor
def experiment_driver(
datasets: List[str],
methods: Dict[str, bool],
method_args: Dict[str, Any],
) -> None:
r"""
Run experiment
"""
job_args = list()
for dnm in datasets:
# Read the dataset
print(f"\nReading/Generating the dataset {dnm.upper()}")
x, y, xt, yt, N, D, train_dataset, test_dataset, num_classes = read_dataset(
dnm, method_args
)
print(
f"\nBayesian {'logistic regression' if method_args['logistic_regression'] else 'neural network'} experiment.\nInference via {' '.join(map(lambda x:x.upper(), methods))} on {dnm} data over {method_args['num_trials']} {'independent trials.' if method_args['num_trials']>1 else 'trial.'}\n\n\n"
)
for nm_alg in methods:
print(f"\n\nRunning {nm_alg}\n")
logistic_regression = method_args.get(
"logistic_regression", method_args.get("architecture") == "logreg"
)
compute_weights_entropy = (
not nm_alg.startswith(("opsvi", "mfvi_subset"))
) and method_args["compute_weights_entropy"]
tps = (
method_args["coreset_sizes"]
if nm_alg.startswith(("psvi", "opsvi", "mfvi_subset"))
else [-1]
) # alias for baselines with no explicit constraint on dataset size
for t in range(method_args["num_trials"]):
print(f"Trial #{t}")
for (
ps
) in tps: # range of pseudocoreset sizes tested over the experiment
print(
f"Coreset/Subset with {ps if not method_args['increment'] else method_args['increment_sizes'][0]} datapoints"
) if ps != -1 else print("Unconstrained data access")
idx = len(job_args)
job_args.append({"mc_samples":method_args["mc_samples"],
"num_epochs":method_args["num_epochs"],
"data_minibatch":method_args["data_minibatch"],
"D":D,
"N":N,
"tr":t,
"diagonal":method_args["diagonal"],
"x":x,
"y":y,
"xt":xt,
"yt":yt,
"inner_it":method_args["inner_it"],
"outer_it":method_args["outer_it"],
"scatterplot_coreset":method_args.get(
"scatterplot_coreset"
), # not parsed for some methods atm
"logistic_regression":logistic_regression,
"trainer":method_args["trainer"],
"log_every":method_args["log_every"],
"register_elbos":method_args["register_elbos"],
"lr0u":method_args["lr0u"],
"lr0net":method_args["lr0net"],
"lr0v":method_args["lr0v"],
"lr0z":method_args["lr0z"],
"lr0alpha":method_args["lr0alpha"],
"init_args":method_args["init_at"],
"init_sd":method_args[
"init_sd"
], # initialization of variance in variational model
"num_pseudo":ps,
"seed":t, # map random seed to the trial number for reproducibility of inference result at the beginning of each of the baseline
"compute_weights_entropy":compute_weights_entropy,
"reset":method_args.get("reset"),
"reset_interval":method_args.get("reset_interval"),
"architecture":method_args.get("architecture"),
"log_pseudodata":method_args.get("log_pseudodata"),
"n_hidden":method_args.get(
"n_hidden", 40
), # hidden units in nn architecture
"n_layers":method_args.get("n_layers", 1),
"train_dataset":train_dataset,
"test_dataset":test_dataset,
"dnm":dnm,
"nc":num_classes,
"prune":method_args.get("prune"),
"prune_interval":method_args.get("prune_interval"),
"prune_sizes":method_args.get("prune_sizes"),
"increment":method_args.get("increment"),
"increment_interval":method_args.get("increment_interval"),
"increment_sizes":method_args.get("increment_sizes"),
"retrain_on_coreset":method_args.get("retrain_on_coreset"),
"learn_z":method_args["learn_z"],
"nm_alg":nm_alg,
"device_id":idx % NUM_GPUS,
})
pool = multiprocessing.Pool(NUM_GPUS) # first arg is the number of workers
results_pool = [pool.apply_async(inf_alg, kwds=job_arg) for job_arg in job_args]
ii=0
for result in results_pool:
_job_arg = job_args[ii]
ii+=1
results[_job_arg["dnm"]][_job_arg["nm_alg"]][_job_arg["num_pseudo"]][_job_arg["tr"]] = result.get()
return write_to_files(results, method_args["fnm"])
def write_to_files(results: Dict[str, Any], fnm: str) -> None:
r"""
Write results to pk files
"""
res_fnm = f"{method_args['results_folder']}/{fnm}.pk"
print(f"Storing results in {res_fnm}")
with open(res_fnm, "wb") as outfile:
pickle.dump(results, outfile)
## Entry point
if __name__ == "__main__":
set_start_method('spawn')
(experiment_driver(
datasets,
methods,
method_args,
) if method_args.get("architecture") != "regressor_net"
else regressor_experiment_driver(
datasets,
methods,
method_args))# run experiment
|
Blackbox-Coresets-VI-main
|
psvi/experiments/flow-psvi-parallel.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import os
import requests
import urllib.request
import zipfile
from collections import namedtuple
from io import BytesIO
import arff
import json
import numpy as np
import pandas as pd
import requests
import torch
import torch.nn as nn
import torchvision
from PIL import Image
from psvi.models.neural_net import (
make_fc2net,
make_fcnet,
make_lenet,
make_regressor_net,
VILinear,
VILinearMultivariateNormal,
)
from sklearn.datasets import make_moons
from sklearn.decomposition import PCA
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from torch.utils.data import Dataset
r"""
Statistics used for normalization of some benchmark vision datasets
"""
dataset_normalization = dict(
MNIST=((0.1307,), (0.3081,)),
Cifar10=((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
)
r"""
Classes of some benchmark vision datasets
"""
dataset_labels = dict(
MNIST=list(range(10)),
Cifar10=(
"plane",
"car",
"bird",
"cat",
"deer",
"dog",
"monkey",
"horse",
"ship",
"truck",
),
)
DatasetStats = namedtuple(
"DatasetStats", " ".join(["num_channels", "real_size", "num_classes"])
)
r"""
Dimensions of vision benchmark datasets
"""
dataset_stats = dict(
MNIST=DatasetStats(1, 28, 10),
Cifar10=DatasetStats(3, 32, 10),
)
class SynthDataset(Dataset):
r"""
Custom torch dataset class supporting transforms
"""
def __init__(self, x, y=None, transforms=None):
self.data = x
self.targets = y
self.transforms = transforms
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index], self.targets[index]
def subset_where(self, cs=[0, 1]):
r"""
Returns subset of data corresponding to given list of classes
"""
idcs = torch.isin(self.targets, torch.tensor(cs))
return SynthDataset(self.data[idcs], self.targets[idcs])
def concatenate(self, u, z):
return SynthDataset(torch.cat((self.data, u)), y=torch.cat((self.targets, z)))
def split_data(N, p_split=(0.6, 0.2, 0.2), n_split=None, shuffle=True, seed=None):
r"""
Helper function for splitting data into train / validation / test
"""
if seed is not None:
np.random.seed(seed)
if n_split is None:
p_split = np.array(p_split)
assert np.sum(p_split == -1) <= 1
p_split[p_split == -1] = 1 - (np.sum(p_split) + 1)
assert np.sum(p_split) == 1.0
p_train, p_val, p_test = p_split
train_idx = int(np.ceil(p_train * N))
val_idx = int(np.ceil(train_idx + p_val * N))
else:
n_split = np.array(n_split)
assert np.sum(n_split == -1) <= 1
n_split[n_split == -1] = N - (np.sum(n_split) + 1)
assert np.sum(n_split) == N
n_train, n_val, n_test = n_split
train_idx = int(n_train)
val_idx = int(train_idx + n_val)
idx = np.arange(N)
if shuffle:
np.random.shuffle(idx)
return {
"train": idx[:train_idx],
"val": idx[train_idx:val_idx],
"test": idx[val_idx:],
}
# custom dataset
class BaseDataset(Dataset):
def __init__(self, x, y=None, randomize=False):
self.data = (
x.mean() + 1.0 * torch.randn_like(x) if randomize else x
) # if randomize return a randomized replica of the data centered around the mean of the empirical distribution
self.targets = y
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index], self.targets[index]
def read_regression_dataset(dnm, method_args):
(X, Y), indices = get_regression_benchmark(
dnm,
seed=method_args["seed"],
p_split=(-1, 0.1, method_args["num_test"]),
)
taus = hyperparams_for_regression()[dnm]
# split into training and test sets
x, y, xv, yv, xt, yt = (
X[indices["train"]],
Y[indices["train"]],
X[indices["val"]],
Y[indices["val"]],
X[indices["test"]],
Y[indices["test"]],
)
N, D = x.shape
# compute training set statistics for normalization
x_mean, y_mean, x_std, y_std = (
np.mean(x, 0),
np.mean(y),
np.std(x, 0),
np.std(y),
)
# Parse in torch dataloaders
train_dataset, val_dataset, test_dataset, y_mean, y_std = (
BaseDataset(
torch.from_numpy(
((x - np.full(x.shape, x_mean)) / np.full(x.shape, x_std)).astype(
np.float32
)
),
torch.from_numpy(((y - y_mean) / y_std).astype(np.float32)),
),
BaseDataset(
torch.from_numpy(
(
(xv - np.full(xv.shape, x_mean)) / np.full(xv.shape, x_std)
).astype(np.float32)
),
torch.from_numpy(yv.astype(np.float32)),
),
BaseDataset(
torch.from_numpy(
(
(xt - np.full(xt.shape, x_mean)) / np.full(xt.shape, x_std)
).astype(np.float32)
),
torch.from_numpy(yt.astype(np.float32)),
),
torch.tensor(y_mean),
torch.tensor(y_std),
)
return x, y, xv, yv, xt, yt, N, D, train_dataset, val_dataset, test_dataset, y_mean, y_std, taus
def get_regression_benchmark(name, seed=111, data_dir="psvi/data/", **kwargs):
r"""
Return data from UCI sets
- param name: (str) Name of dataset to be used
- param seed: (int) Random seed for splitting data into train and test
- param kwargs: (dict) Additional arguments for splits
- return: Inputs, outputs, and data-splits
"""
np.random.seed(seed)
if not os.path.exists(data_dir):
os.mkdir(data_dir)
urllinks = {"concrete": "http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/compressive/Concrete_Data.xls",
"energy": "https://archive.ics.uci.edu/ml/machine-learning-databases/00242/ENB2012_data.xlsx",
"power": "https://archive.ics.uci.edu/ml/machine-learning-databases/00294/CCPP.zip",
"kin8nm": "https://www.openml.org/data/download/3626/dataset_2175_kin8nm.arff",
"protein": "https://archive.ics.uci.edu/ml/machine-learning-databases/00265/CASP.csv",
"naval": "http://archive.ics.uci.edu/ml/machine-learning-databases/00316/UCI%20CBM%20Dataset.zip",
"yacht": "http://archive.ics.uci.edu/ml/machine-learning-databases/00243/yacht_hydrodynamics.data",
"boston": "https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data",
"wine": "https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv",
"year": "https://archive.ics.uci.edu/ml/machine-learning-databases/00203/YearPredictionMSD.txt.zip"}
filename = urllinks[name].split('/')[-1]
if not os.path.exists(data_dir + filename):
urllib.request.urlretrieve(
urllinks[name], data_dir + filename)
if name in ["concrete", "energy"]:
data = np.array(pd.read_excel(data_dir + filename))
elif name == "power":
zipfile.ZipFile(data_dir + filename).extractall(data_dir)
data = pd.read_excel(data_dir + 'CCPP/Folds5x2_pp.xlsx', header=0).values
elif name == "kin8nm":
dataset = arff.load(open(data_dir + filename))
data = np.array(dataset['data'])
elif name == "protein":
data = np.array(pd.read_csv(data_dir + filename))
elif name == "naval":
zipfile.ZipFile(data_dir + filename).extractall(data_dir)
data = np.loadtxt(data_dir + "UCI CBM Dataset/data.txt")
elif name in ["yacht", "boston"]:
data = np.loadtxt(data_dir + filename)
elif name == "wine":
data = np.array(pd.read_csv(data_dir + filename, delimiter=";"))
elif name == "year":
zipfile.ZipFile(data_dir + "/YearPredictionMSD.txt.zip").extractall(data_dir)
data = np.loadtxt(data_dir + "/YearPredictionMSD.txt" , delimiter=",")
elif name == "sinus":
X = np.random.rand(10**3) * 2 * np.pi
Y = np.sin(X)
data = np.stack((X, Y), axis=-1)
else:
raise ValueError("Unsupported dataset: {}".format(data_dir, name))
if name in ["energy", "naval"]: # dataset has 2 response values
X = data[:, :-2]
Y = data[:, -2:-1] # pick first response value
else:
X = data[:, :-1]
Y = data[:, -1:]
return (X, Y), split_data(len(X), **kwargs)
def hyperparams_for_regression():
r"""
Grid search space for precision in the regression BNN model
"""
return {
"concrete": [0.025, 0.05, 0.075],
"energy": [0.25, 0.5, 0.75],
"power": [0.05, 0.1, 0.15],
"kin8nm": [150, 200, 250],
"protein": [0.025, 0.05, 0.075],
"naval": [30000, 40000, 50000],
"yacht": [0.25, 0.5, 0.75],
"boston": [0.1, 0.15, 0.2],
"wine": [2.5, 3.0, 3.5],
"year":[0.1, 1., 10.]
}
def make_four_class_dataset(N_K=250):
r"""
Return two-dimensional four_blobs dataset with datapoints equally distributed among 4 classes
:param N_K (int): number of datapoints per class
"""
X1 = torch.cat(
[
0.8 + 0.4 * torch.randn(N_K, 1),
1.5 + 0.4 * torch.randn(N_K, 1),
],
dim=-1,
)
Y1 = 0 * torch.ones(X1.size(0)).long()
X2 = torch.cat(
[
0.5 + 0.6 * torch.randn(N_K, 1),
-0.2 - 0.1 * torch.randn(N_K, 1),
],
dim=-1,
)
Y2 = 1 * torch.ones(X2.size(0)).long()
X3 = torch.cat(
[
2.5 - 0.1 * torch.randn(N_K, 1),
1.0 + 0.6 * torch.randn(N_K, 1),
],
dim=-1,
)
Y3 = 2 * torch.ones(X3.size(0)).long()
X4 = torch.distributions.MultivariateNormal(
torch.Tensor([-0.5, 1.5]),
covariance_matrix=torch.Tensor([[0.2, 0.1], [0.1, 0.1]]),
).sample(torch.Size([N_K]))
Y4 = 3 * torch.ones(X4.size(0)).long()
X = torch.cat([X1, X2, X3, X4], dim=0)
X[:, 1] -= 1
X[:, 0] -= 0.5
Y = torch.cat([Y1, Y2, Y3, Y4])
rows_permutations = torch.randperm(X.size()[0])
return (
X[rows_permutations, :],
Y[rows_permutations],
) # shuffle rows for our train/test split
def set_up_model(
D=None,
n_hidden=None,
nc=None,
mc_samples=None,
architecture=None,
**kwargs,
):
r"""
Return torch nn model with the desired architecture
:param D (int): dimensionality of input data
:param n_hidden (int): number of units in each hidden layer
:param nc (int): dimensionality of last layer
:param mc_samples (int): number of samples produced at each forward pass through the nn
:param architecture (str): nn architecture
- "fn": fully connected feedforward network with diagonal Gaussian on variational layers
- "residual_fn": fn with residual connections
- "fn2": fn with full covariance matrix on variational layers
- "lenet": LeNet architecture
- "logistic_regression": single layer nn (no hidden layers) implementing the logistic regression model
- "logistic_regression_fullcov": single layer nn (no hidden layers) implementing the logistic regression model with full covariance variational approximations
"""
if architecture in {"fn", "residual_fn"}:
return make_fcnet(
D,
n_hidden,
nc,
linear_class=VILinear,
nonl_class=nn.ReLU,
mc_samples=mc_samples,
residual=(architecture == "residual_fn"),
**kwargs,
)
elif architecture in {"fn2"}:
return make_fc2net(
D,
n_hidden,
nc, # does not support argument on the number of chanells
linear_class=VILinearMultivariateNormal,
nonl_class=nn.ReLU,
mc_samples=mc_samples,
**kwargs,
)
elif architecture == "lenet":
return make_lenet(
linear_class=VILinear, nonl_class=nn.ReLU, mc_samples=mc_samples
)
elif architecture in {
"regressor_net",
}: # feed forward VI BNN for regression with diagonal covariance (optional arg for residual connections)
return make_regressor_net(
D,
n_hidden,
nc,
linear_class=VILinear,
nonl_class=nn.ReLU,
mc_samples=mc_samples,
residual=(architecture == "residual_fn"),
**kwargs,
)
elif architecture == "logistic_regression":
return nn.Sequential(VILinear(D, nc, mc_samples=mc_samples))
elif architecture == "logistic_regression_fullcov":
return nn.Sequential(VILinearMultivariateNormal(D, nc, mc_samples=mc_samples))
else:
raise ValueError(
"Architecture should be one of \n'lenet', 'logistic_regression', 'logistic_regression_fullcov', 'fn', 'fn2', 'residual_fn'"
)
@contextlib.contextmanager
def suppress_stdout():
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
yield
def get_torchvision_info(name):
r"""
Returns statistical information for specified torchvision benchmark dataset
"""
assert name in dataset_stats, "Unsupported dataset: {}".format(name)
num_channels, input_size, num_classes = dataset_stats[name]
normalization = dataset_normalization[name]
labels = dataset_labels[name]
return num_channels, input_size, num_classes, normalization, labels
def load_dataset(path, urls):
r"""
Writes on a file a dataset living on a given URL
"""
if not os.path.exists(path):
os.mkdir(path)
for url in urls:
data = requests.get(url).content
filename = os.path.join(path, os.path.basename(url))
with open(filename, "wb") as file:
file.write(data)
return
def read_adult(data_folder):
r"""
Returns the adult dataset for logistic regression
"""
urls = [
"http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.names",
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test",
]
load_dataset(data_folder, urls)
columns = [
"age",
"workClass",
"fnlwgt",
"education",
"education-num",
"marital-status",
"occupation",
"relationship",
"race",
"sex",
"capital-gain",
"capital-loss",
"hours-per-week",
"native-country",
"income",
]
train_data = pd.read_csv(
data_folder + "/adult.data",
names=columns,
sep=" *, *",
na_values="?",
engine="python",
).dropna()
test_data = pd.read_csv(
data_folder + "/adult.test",
names=columns,
sep=" *, *",
skiprows=1,
na_values="?",
engine="python",
).dropna()
X, Xt = train_data[columns[::-1]], test_data[columns[::-1]]
Y = np.array([0 if s == "<=50K" else 1 for s in train_data["income"]])
Yt = np.array([0 if s == "<=50K." else 1 for s in test_data["income"]])
# numerical columns : standardize
numcols = ["age", "education-num", "capital-gain", "capital-loss", "hours-per-week"]
ss = StandardScaler()
ss.fit(X[numcols])
Xnum, Xtnum = ss.transform(X[numcols]), ss.transform(Xt[numcols])
# categorical columns: apply 1-hot-encoding
catcols = [
"workClass",
"marital-status",
"occupation",
"relationship",
"race",
"sex",
"native-country",
]
enc = OneHotEncoder()
enc.fit(X[catcols])
Xcat, Xtcat = (
enc.transform(X[catcols]).toarray(),
enc.transform(Xt[catcols]).toarray(),
)
X, Xt = np.concatenate((Xnum, Xcat), axis=1), np.concatenate((Xtnum, Xtcat), axis=1)
pca = PCA(n_components=10)
pca.fit(X)
X = pca.transform(X)
Xt = pca.transform(Xt)
X = np.c_[X, np.ones(X.shape[0])]
Xt = np.c_[Xt, np.ones(Xt.shape[0])]
return X, Y, Xt, Yt
def read_phishing(data_folder, dnm="phishing"):
r"""
Returns the phishing dataset for logistic regression
"""
filename, urllink = (
f"{data_folder}/{dnm}.npz",
f"https://github.com/trevorcampbell/bayesian-coresets/blob/master/examples/data/{dnm}.npz?raw=true",
)
if not os.path.isfile(filename):
response = requests.get(urllink)
response.raise_for_status()
data = np.load(BytesIO(response.content))
else:
data = np.load(filename)
return data["X"], data["y"]
def read_webspam(data_folder, dnm="webspam"):
r"""
Returns the webspam dataset for logistic regression
"""
import sklearn.datasets as skl_ds
from sklearn import preprocessing
import scipy.sparse as sp
import numpy as np
fnm_train, urllink_train = (
f"{data_folder}/{dnm}_train.svm",
"https://bitbucket.org/jhhuggins/lrcoresets/raw/cdcda24b5854ef380795ec11ab5321d0ec53c3fe/data/webspam_train.svm",
)
fnm_test, urllink_test = (
f"{data_folder}/{dnm}_test.svm",
"https://bitbucket.org/jhhuggins/lrcoresets/raw/cdcda24b5854ef380795ec11ab5321d0ec53c3fe/data/webspam_test.svm",
)
import urllib.request
if not os.path.isfile(fnm_train):
urllib.request.urlretrieve(urllink_train, fnm_train)
if not os.path.isfile(fnm_test):
urllib.request.urlretrieve(urllink_test, fnm_test)
def _load_svmlight_data(path):
X, y = skl_ds.load_svmlight_file(path)
return X, y
def load_data(path, file_type, max_data=0, max_dim=0,
preprocess=True, include_offset=True):
"""Load data from a variety of file types.
Parameters
----------
path : string
Data file path.
file_type : string
Supported file types are: 'svmlight', 'npy' (with the labels y in the
rightmost col), 'npz', 'hdf5' (with datasets 'x' and 'y'), and 'csv'
(with the labels y in the rightmost col)
max_data : int
If positive, maximum number of data points to use. If zero or negative,
all data is used. Default is 0.
max_dim : int
If positive, maximum number of features to use. If zero or negative,
all features are used. Default is 0.
preprocess : boolean or Transformer, optional
Flag indicating whether the data should be preprocessed. For sparse
data, the features are scaled to [-1, 1]. For dense data, the features
are scaled to have mean zero and variance one. Default is True.
include_offset : boolean, optional
Flag indicating that an offset feature should be added. Default is
False.
Returns
-------
X : array-like matrix, shape=(n_samples, n_features)
y : int ndarray, shape=(n_samples,)
Each entry indicates whether each example is negative (-1 value) or
positive (+1 value)
pp_obj : None or Transformer
Transformer object used on data, or None if ``preprocess=False``
"""
if not isinstance(path, str):
raise ValueError("'path' must be a string")
if file_type in ["svmlight", "svm"]:
X, y = _load_svmlight_data(path)
else:
raise ValueError("unsupported file type, %s" % file_type)
y_vals = set(y)
if len(y_vals) != 2:
raise ValueError('Only expected y to take on two values, but instead'
'takes on the values ' + ', '.join(y_vals))
if 1.0 not in y_vals:
raise ValueError('y does not take on 1.0 as one on of its values, but '
'instead takes on the values ' + ', '.join(y_vals))
if -1.0 not in y_vals:
y_vals.remove(1.0)
print('converting y values of %s to -1.0' % y_vals.pop())
y[y != 1.0] = -1.0
if preprocess is False:
pp_obj = None
else:
if preprocess is True:
if sp.issparse(X):
pp_obj = preprocessing.MaxAbsScaler(copy=False)
else:
pp_obj = preprocessing.StandardScaler(copy=False)
pp_obj.fit(X)
else:
pp_obj = preprocess
X = pp_obj.transform(X)
if include_offset:
X = preprocessing.add_dummy_feature(X)
X = np.flip(X, -1) # move intercept to the last column of the array
if sp.issparse(X) and (X.nnz > np.prod(X.shape) / 10 or X.shape[1] <= 20):
print("X is either low-dimensional or not very sparse, so converting "
"to a numpy array")
X = X.toarray()
if isinstance(max_data, int) and max_data > 0 and max_data < X.shape[0]:
X = X[:max_data,:]
y = y[:max_data]
if isinstance(max_dim, int) and max_dim > 0 and max_dim < X.shape[1]:
X = X[:,:max_dim]
return X, y, pp_obj
X, y, _ = load_data(fnm_train, 'svm')
# load testing data if it exists
Xt, yt, _ = load_data(fnm_test, 'svm')
y[y==-1], yt[yt==-1] = 0, 0
np.savez('webspam', X=X, y=y, Xt=Xt, yt=yt)
return X, y, Xt, yt
def make_synthetic(num_datapoints=1000, D=2):
r"""
Generate D-dimensional synthetic dataset for logistic regression
"""
mu = np.array([0]*D)
cov = np.eye(D)
th = np.array([5]*D)
X = np.random.multivariate_normal(mu, cov, num_datapoints)
ps = 1.0/(1.0+np.exp(-(X*th).sum(axis=1)))
y = (np.random.rand(num_datapoints) <= ps).astype(int)
y[y==0] = -1
return torch.from_numpy(X.astype(np.float32)), torch.from_numpy(y.astype(np.float32))
def read_dataset(dnm, method_args):
r"""
Returns one of the supported benchmark or synthetic dataset for the experiments in logistic regression, classification or regression via Bayesian nns
"""
# TBC: check if inference methods are compatible with the dataset and raise exceptions accordingly
if dnm != "MNIST": # UCI or synthetic datasets
if dnm == "halfmoon":
# Generate HalfMoon data
(X, Y), num_classes = (
make_moons(n_samples=1000, noise=0.1, random_state=42),
2,
)
X, Y = torch.from_numpy(X.astype(np.float32)), torch.from_numpy(
Y.astype(np.float32)
)
elif dnm == "four_blobs":
# Generate synthetic multiclass data
(X, Y), num_classes = make_four_class_dataset(N_K=250), 4
elif dnm == "phishing":
(X, Y), num_classes = read_phishing(method_args["data_folder"]), 2
X, Y = torch.from_numpy(X.astype(np.float32)), torch.from_numpy(
Y.astype(np.float32)
)
elif dnm == "adult":
(x, y, xt, yt), num_classes = read_adult(method_args["data_folder"]), 2
x, y, xt, yt = (
torch.from_numpy(x.astype(np.float32)),
torch.from_numpy(y.astype(np.float32)),
torch.from_numpy(xt.astype(np.float32)),
torch.from_numpy(yt.astype(np.float32)),
)
elif dnm == "webspam":
(x, y, xt, yt), num_classes = read_webspam(method_args["data_folder"]), 2
x, y, xt, yt = (
torch.from_numpy(x.astype(np.float32)),
torch.from_numpy(y.astype(np.float32)),
torch.from_numpy(xt.astype(np.float32)),
torch.from_numpy(yt.astype(np.float32)),
)
elif dnm.startswith("synth_lr"):
(X, Y), num_classes = make_synthetic(D=int(dnm.split('_')[-1]), num_datapoints=1000), 2
if dnm.startswith(("halfmoon", "four_blobs", "phishing", "synth_lr")): # splite in train / test data
Y[Y == -1] = 0
test_size = int(method_args["test_ratio"] * X.shape[0])
x, y, xt, yt = (
X[:-test_size],
Y[:-test_size],
X[-test_size:],
Y[-test_size:],
)
N, D = x.shape
(train_dataset, test_dataset) = (
(SynthDataset(x, y), SynthDataset(xt, yt))
if dnm.startswith(("halfmoon", "four_blobs", "phishing", "synth_lr", "webspam", "adult"))
else (None, None)
)
else:
_, input_size, num_classes, normalization, _ = get_torchvision_info(dnm)
real_size = dataset_stats[dnm].real_size
N, D = 60000, input_size
if input_size != real_size:
transform_list = [
torchvision.transforms.Resize([input_size, input_size], Image.BICUBIC)
]
else:
transform_list = []
transform_list += [
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(*normalization),
]
with suppress_stdout():
train_dataset, test_dataset = torchvision.datasets.MNIST(
root=method_args["data_folder"],
download=True,
train=True,
transform=torchvision.transforms.Compose(transform_list),
), torchvision.datasets.MNIST(
root=method_args["data_folder"],
download=True,
train=False,
transform=torchvision.transforms.Compose(transform_list),
)
x, y, xt, yt = None, None, None, None
return x, y, xt, yt, N, D, train_dataset, test_dataset, num_classes
from json.decoder import JSONDecodeError
def update_hyperparams_dict(dnm, best_tau, fnm='psvi/data/opt_regr_hyperparams.json'):
pass
'''
with open(fnm, "a+") as f:
try:
opt_taus = json.loads(f)
except JSONDecodeError:
opt_taus = {"init":0}
json.dumps(opt_taus, f)
opt_taus = json.load(f)
opt_taus[dnm] = opt_taus.get(dnm, best_tau)
'''
|
Blackbox-Coresets-VI-main
|
psvi/experiments/experiments_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
Blackbox-Coresets-VI-main
|
psvi/models/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import operator as op
from functools import reduce
import numpy as np
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
def gaussian_fn(loc=None, scale=None):
return dist.normal.Normal(loc, scale)
def categorical_fn(logits=None, probs=None):
return dist.Categorical(logits=logits, probs=probs)
def set_mc_samples(net, mc_samples):
for module in net.modules():
if isinstance(module, VIMixin):
module.mc_samples = mc_samples
def inverse_softplus(x):
if torch.is_tensor(x):
return x.expm1().log()
return np.log(np.expm1(x))
def prod(a):
return reduce(op.mul, a)
def deep_getattr(obj, name):
return reduce(getattr, name.split("."), obj)
def deep_delattr(obj, name):
lpart, _, rpart = name.rpartition(".")
if lpart:
obj = deep_getattr(obj, lpart)
delattr(obj, rpart)
def deep_setattr(obj, name, value):
lpart, _, rpart = name.rpartition(".")
if lpart:
obj = deep_getattr(obj, lpart)
setattr(obj, rpart, value)
class VIMixin(nn.Module):
def __init__(self, *args, init_sd=0.01, prior_sd=1.0, mc_samples=1, **kwargs):
super().__init__(*args, **kwargs)
self._weight_sd = nn.Parameter(
inverse_softplus(torch.full_like(self.weight, init_sd))
)
if self.bias is not None:
self._bias_sd = nn.Parameter(
nn.Parameter(inverse_softplus(torch.full_like(self.bias, init_sd)))
)
else:
self.register_parameter("_bias_sd", None)
(
self.prior_sd,
self.mc_samples,
self._cached_weight,
self._cached_bias,
self._init_sd,
) = (
prior_sd,
mc_samples,
None,
None,
init_sd,
)
self.reset_parameters_variational()
def reset_parameters_variational(self) -> None:
super().reset_parameters() # pyre-ignore
self._weight_sd.data.copy_(
inverse_softplus(torch.full_like(self.weight, self._init_sd))
)
if self.bias is not None:
self._bias_sd.data.copy_(
inverse_softplus(torch.full_like(self.bias, self._init_sd))
)
self._cached_weight, self._cached_bias = (
None,
None,
)
def kl(self):
w_kl = dist.kl_divergence(self.weight_dist, self.prior_weight_dist)
b_kl = (
dist.kl_divergence(self.bias_dist, self.prior_bias_dist)
if self.bias is not None
else 0.0
)
return w_kl + b_kl
def sampled_nkl(self):
w = self._cached_weight
w_kl = self.prior_weight_dist.log_prob(w) - self.weight_dist.log_prob(w)
b = self._cached_bias.squeeze(1) if self.mc_samples > 1 else self._cached_bias
b_kl = self.prior_bias_dist.log_prob(b) - self.bias_dist.log_prob(b)
return w_kl + b_kl
@property
def weight_dist(self):
return dist.Independent(
dist.Normal(self.weight, self.weight_sd), self.weight.ndim
)
@property
def prior_weight_dist(self):
return dist.Independent(
dist.Normal(torch.zeros_like(self.weight), self.prior_sd), self.weight.ndim
)
@property
def weight_sd(self):
return F.softplus(self._weight_sd)
@property
def bias_dist(self):
if self.bias is not None:
return dist.Independent(
dist.Normal(self.bias, self.bias_sd), self.bias.ndim
)
return None
@property
def prior_bias_dist(self):
if self.bias is not None:
return dist.Independent(
dist.Normal(torch.zeros_like(self.bias), self.prior_sd), self.bias.ndim
)
return None
@property
def bias_sd(self):
if self.bias is not None:
return F.softplus(self._bias_sd)
return None
def rsample(self):
weight = self.weight_dist.rsample(self.weight_batch_shape)
bias = (
self.bias_dist.rsample(self.bias_batch_shape)
if self.bias is not None
else None
)
return weight, bias
@property
def weight_batch_shape(self):
return torch.Size((self.mc_samples,) if self.mc_samples > 1 else ())
@property
def bias_batch_shape(self):
return torch.Size((self.mc_samples, 1) if self.mc_samples > 1 else ())
def extra_repr(self):
return f"{super().extra_repr()}, mc_samples={self.mc_samples}"
class VILinear(VIMixin, nn.Linear):
def forward(self, x):
self._cached_weight, self._cached_bias = self.rsample()
return x.matmul(self._cached_weight.transpose(-2, -1)) + self._cached_bias
"""
class ResNet(torch.nn.Module):
def __init__(self, module, skip_connection):
super().__init__()
self.module = module
self.skip_connection = skip_connection
def forward(self, x):
return self.module(x) + self.skip_connection(x)
"""
class VIConv2d(VIMixin, nn.Conv2d):
def __init__(self, *args, **kwargs):
if "groups" in kwargs:
raise ValueError(
"Cannot use groups argument for variational conv layer as this is used for parallelizing across samples."
)
super().__init__(*args, **kwargs)
def forward(self, x):
# x: S x N x C x H x W
# or
# x: N x C x H x W
# reshape to: N x SC x H x W
# so that when we convolve with
# w: SK x C x h x w
# we get an output with shape
# N x SK x H' x W'
# that we reshape to
# S x N x K x H' x W'
if self.mc_samples > 1:
if x.ndim == 4:
x = x.repeat(1, self.mc_samples, 1, 1)
else:
x = x.transpose(0, 1).flatten(1, 2)
self._cached_weight, self._cached_bias = self.rsample()
w = (
self._cached_weight.flatten(0, 1)
if self.mc_samples > 1
else self._cached_weight
)
b = self._cached_bias.flatten()
a = F.conv2d(
x,
w,
b,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.mc_samples,
)
if self.mc_samples > 1:
return a.view(
-1, self.mc_samples, self.out_channels, *a.shape[-2:]
).transpose(0, 1)
return a
class BatchMaxPool2d(nn.MaxPool2d):
def forward(self, x):
if x.shape == 4:
return super().forward(x)
d0, d1 = x.shape[:2]
x = super().forward(x.flatten(0, 1))
return x.view(d0, d1, *x.shape[1:])
def make_fcnet(
in_dim,
h_dim,
out_dim,
n_layers=2,
linear_class=None,
nonl_class=None,
mc_samples=4,
residual=False,
**kwargs,
):
if linear_class is None:
linear_class = VILinear
if nonl_class is None:
nonl_class = nn.ReLU
net = nn.Sequential()
for i in range(n_layers):
net.add_module(
f"lin{i}", linear_class(in_dim if i == 0 else h_dim, h_dim, **kwargs)
)
net.add_module(f"nonl{i}", nonl_class())
"""
if residual:
skip_connection = nn.Linear(in_dim, h_dim)
net = ResNet(net, skip_connection)
"""
net.add_module("classifier", linear_class(h_dim, out_dim, **kwargs))
for module in net.modules():
module.mc_samples = mc_samples
return net
def make_regressor_net(
in_dim,
h_dim,
out_dim=1,
n_layers=2,
linear_class=None,
nonl_class=None,
mc_samples=4,
residual=False,
**kwargs,
):
if linear_class is None:
linear_class = VILinear
if nonl_class is None:
nonl_class = nn.ReLU
net = nn.Sequential()
for i in range(n_layers):
net.add_module(
f"lin{i}",
linear_class(in_dim if i == 0 else h_dim, h_dim, **kwargs),
)
net.add_module(f"nonl{i}", nonl_class())
"""
if residual:
skip_connection = nn.Linear(in_dim, h_dim)
net = ResNet(net, skip_connection)
"""
net.add_module("regressor", linear_class(h_dim, out_dim, **kwargs))
for module in net.modules():
module.mc_samples = mc_samples
return net
def make_lenet(
conv_class=None, linear_class=None, pool_class=None, nonl_class=None, **kwargs
):
if conv_class is None:
conv_class = VIConv2d
if linear_class is None:
linear_class = VILinear
if pool_class is None:
pool_class = BatchMaxPool2d
if nonl_class is None:
nonl_class = nn.ReLU
return nn.Sequential(
conv_class(1, 6, 5, padding=2, **kwargs),
nonl_class(),
pool_class(2, 2),
conv_class(6, 16, 5, padding=0, **kwargs),
nonl_class(),
pool_class(2, 2),
nn.Flatten(-3, -1),
linear_class(400, 120, **kwargs),
nonl_class(),
linear_class(120, 84, **kwargs),
nonl_class(),
linear_class(84, 10),
)
def make_alexnet(
conv_class=None,
linear_class=None,
pool_class=None,
nonl_class=None,
local_response_norm_class=None,
**kwargs,
):
if conv_class is None:
conv_class = VIConv2d
if linear_class is None:
linear_class = VILinear
if pool_class is None:
pool_class = BatchMaxPool2d
if nonl_class is None:
nonl_class = nn.ReLU
if local_response_norm_class is None:
local_response_norm_class = nn.LocalResponseNorm
return nn.Sequential(
conv_class(3, 64, 5, stride=1, padding=2),
nonl_class(),
pool_class(kernel_size=3, stride=2, padding=1),
local_response_norm_class(4, alpha=0.001 / 9.0, beta=0.75, k=1),
conv_class(64, 64, kernel_size=5, padding=2, stride=1),
nonl_class(),
local_response_norm_class(4, alpha=0.001 / 9.0, beta=0.75, k=1),
pool_class(kernel_size=3, stride=2, padding=1),
nn.Flatten(-3, -1),
linear_class(
4096, 384, **kwargs
), # add kwargs so that mc_samples arg gets correctly passed
nonl_class(),
linear_class(384, 192, **kwargs),
nonl_class(),
linear_class(192, 10),
)
class network(torch.nn.Module):
def __init__(self, **kwargs):
self.upscale = nn.Upsample(scale_factor=2, mode="bilinear")
self.conv1 = nn.Conv2d(963, 128, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(128, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 2, kernel_size=3, padding=1)
class MultivariateNormalVIMixin(nn.Module):
def __init__(self, *args, init_sd=0.01, prior_sd=1., mc_samples=1, **kwargs):
super().__init__(*args, **kwargs)
self.mc_samples = mc_samples
self.prior_sd = prior_sd
self.param_names = []
self.param_shapes = []
for n, p in list(self.named_parameters()):
self.param_names.append(n)
self.param_shapes.append(p.shape)
deep_delattr(self, n)
self.param_numels = list(map(prod, self.param_shapes))
n = sum(self.param_numels)
self.mean = nn.Parameter(p.new_zeros(n))
self._sd = nn.Parameter(inverse_softplus(p.new_full((n,), init_sd)))
n_corr = torch.tril_indices(n - 1, n - 1, offset=-1)[0].numel()
self._corr = nn.Parameter(p.new_zeros(n_corr))
self.num_params = n
def reset_parameters_variational(self) -> None:
raise NotImplementedError
def kl(self):
return dist.kl_divergence(self.param_dist, self.prior_dist)
def sampled_nkl(self):
x = torch.cat(
[deep_getattr(self, n).flatten(1) for n in self.param_names], dim=1
)
return self.prior_dist.log_prob(x) - self.param_dist.log_prob(x)
'''
def sampled_nkl(self):
w = self._cached_weight
w_kl = self.prior_weight_dist.log_prob(w) - self.weight_dist.log_prob(w)
b = self._cached_bias.squeeze(1) if self.mc_samples > 1 else self._cached_bias
b_kl = self.prior_bias_dist.log_prob(b) - self.bias_dist.log_prob(b)
return w_kl + b_kl
'''
@property
def scale_tril(self):
k = self.mean.new_zeros(self.num_params, self.num_params)
k[torch.arange(self.num_params), torch.arange(self.num_params)] = F.softplus(
self._sd
)
d = self.mean.size(-1) - 1
i = torch.tril_indices(d, d, offset=-1)
k[i[0], i[1]] = self._corr
return k
@property
def param_dist(self):
return dist.MultivariateNormal(self.mean, scale_tril=self.scale_tril)
def rsample(self):
x = self.param_dist.rsample((self.mc_samples,))
return [
xx.reshape(self.mc_samples, *shape)
for xx, shape in zip(x.split(self.param_numels, dim=-1), self.param_shapes)
]
def cached_rsample(self):
for name, sample in zip(self.param_names, self.rsample()):
deep_setattr(self, name, sample)
@property
def prior_dist(self):
m = torch.zeros_like(self.mean)
sd = torch.full_like(self.mean, self.prior_sd).diag_embed()
return dist.MultivariateNormal(m, scale_tril=sd)
class VILinearMultivariateNormal(MultivariateNormalVIMixin, nn.Linear):
def forward(self, x, **kwargs):
super().cached_rsample()
x = x.matmul(self.weight.transpose(-1, -2))
if self.bias is not None:
x = x + self.bias.unsqueeze(-2)
return x
def make_fc2net(
in_dim,
h_dim,
out_dim,
n_layers=2,
linear_class=None,
nonl_class=None,
mc_samples=4,
residual=False,
**kwargs,
):
if linear_class is None:
linear_class = VILinearMultivariateNormal
if nonl_class is None:
nonl_class = nn.ReLU
net = nn.Sequential()
for i in range(n_layers):
net.add_module(
f"lin{i}", linear_class(in_dim if i == 0 else h_dim, h_dim, mc_samples=mc_samples, **kwargs)
)
net.add_module(f"nonl{i}", nonl_class())
"""
if residual:
skip_connection = nn.Linear(in_dim, h_dim)
net = ResNet(net, skip_connection)
"""
net.add_module("classifier", linear_class(h_dim, out_dim, mc_samples=mc_samples, **kwargs))
for module in net.modules():
module.mc_samples = mc_samples
return net
|
Blackbox-Coresets-VI-main
|
psvi/models/neural_net.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import stan
import torch
from torch.distributions.normal import Normal
def logreg_forward(thetas, x):
return x.matmul(thetas.T).sigmoid().mean(axis=1).squeeze()
def model(thetas, mu0, sigma0, x, y, single=False):
prior_val = Normal(mu0, sigma0).log_prob(thetas).sum()
if single:
return -torch.nn.BCEWithLogitsLoss(reduction="none")(x @ thetas, y), prior_val
return (
-torch.nn.BCEWithLogitsLoss(reduction="none")(
x.matmul(thetas.T).squeeze(), y.repeat(thetas.shape[0], 1).T
),
prior_val,
)
def prior(D):
mu0_w, sigma0_w, mu0_b, sigma0_b = (
torch.zeros(D),
torch.ones(D),
torch.zeros(1),
torch.ones(1),
)
return mu0_w, sigma0_w, mu0_b, sigma0_b
def inverse_softplus(x):
if torch.is_tensor(x):
return x.expm1().log()
return np.log(np.expm1(x))
# Stan model used for coreset posterior sampling in the original Sparse VI implementation
stan_representation = """
data {
int<lower=0> d; // 1 + dimensionality of x
int<lower=0> n; // number of observations
matrix[n,d] x; // inputs
int<lower=0,upper=1> y[n]; // outputs in {0, 1}
vector[n] w; // weights
}
parameters {
real theta0; // intercept
vector[d] theta; // logreg params
}
model {
theta0 ~ normal(0, 1);
theta ~ normal(0, 1);
for(i in 1:n){
target += w[i]*bernoulli_logit_lpmf(y[i]| theta0 + x[i]*theta);
}
}
"""
def mcmc_sample(sml, core_idcs, x, y, w, N_per=2000, seed=42, n_samples=5):
np.random.seed(seed=seed)
torch.manual_seed(seed)
sampler_data = {
"x": x[core_idcs, :].detach().cpu().numpy(),
"y": y[core_idcs].detach().cpu().numpy().astype(int),
"d": x.shape[1],
"n": len(core_idcs),
"w": w[core_idcs].detach().cpu().numpy(),
}
sml = stan.build(stan_representation, data=sampler_data, seed=seed)
sampling_output = sml.sample(
num_samples=N_per,
chains=1,
control={"adapt_delta": 0.9, "max_treedepth": 15},
verbose=False,
)[:, -n_samples:]
param_samples = torch.cat(
(
torch.tensor([d["theta"] for d in sampling_output]),
torch.tensor([d["theta0"] for d in sampling_output]).unsqueeze(axis=1),
),
axis=1,
)
return param_samples
def laplace_precision(z_core, theta, w, diagonal=False):
with torch.no_grad():
m = z_core @ theta
idcs = w > 0
p = m[idcs].sigmoid()
d = p * (1 - p) * w[idcs]
a = z_core[idcs].T * d.sqrt()
if diagonal:
return a.pow(2).sum(1) + 1
else:
nll_hessian = a.matmul(a.T)
negative_log_prior_hessian = torch.eye(z_core.shape[1])
return negative_log_prior_hessian + nll_hessian
|
Blackbox-Coresets-VI-main
|
psvi/models/logreg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All Rights Reserved.
"""from https://github.com/lrjconan/RBP/blob/9c6e68d1a7e61b1f4c06414fae04aeb43c8527cb/utils/model_helper.py"""
import torch
def cg(Ax, b, max_iter=100, epsilon=1.0e-5):
"""Conjugate Gradient
Args:
Ax: function, takes list of tensors as input
b: list of tensors
Returns:
x_star: list of tensors
"""
x_last = [torch.zeros_like(bb) for bb in b]
r_last = [torch.zeros_like(bb).copy_(bb) for bb in b]
p_last = [torch.zeros_like(rr).copy_(rr) for rr in r_last]
for _ in range(max_iter):
Ap = Ax(p_last)
Ap_vec = cat_list_to_tensor(Ap)
p_last_vec = cat_list_to_tensor(p_last)
r_last_vec = cat_list_to_tensor(r_last)
rTr = torch.sum(r_last_vec * r_last_vec)
pAp = torch.sum(p_last_vec * Ap_vec)
alpha = rTr / pAp
x = [xx + alpha * pp for xx, pp in zip(x_last, p_last)]
r = [rr - alpha * pp for rr, pp in zip(r_last, Ap)]
r_vec = cat_list_to_tensor(r)
if float(torch.norm(r_vec)) < epsilon:
break
beta = torch.sum(r_vec * r_vec) / rTr
p = [rr + beta * pp for rr, pp in zip(r, p_last)]
x_last = x
p_last = p
r_last = r
return x_last
def cat_list_to_tensor(list_tx):
return torch.cat([xx.view([-1]) for xx in list_tx])
|
Blackbox-Coresets-VI-main
|
psvi/hypergrad/CG_torch.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All Rights Reserved.
from itertools import repeat
import torch
class DifferentiableOptimizer:
def __init__(self, loss_f, dim_mult, data_or_iter=None):
"""
Args:
loss_f: callable with signature (params, hparams, [data optional]) -> loss tensor
data_or_iter: (x, y) or iterator over the data needed for loss_f
"""
self.data_iterator = None
if data_or_iter:
self.data_iterator = (
data_or_iter
if hasattr(data_or_iter, "__next__")
else repeat(data_or_iter)
)
self.loss_f = loss_f
self.dim_mult = dim_mult
self.curr_loss = None
def get_opt_params(self, params):
opt_params = list(params)
for _ in range(self.dim_mult - 1):
opt_params.extend([torch.zeros_like(p) for p in params])
return opt_params
def step(self, params, hparams, create_graph):
raise NotImplementedError
def __call__(self, params, hparams, create_graph=True):
with torch.enable_grad():
return self.step(params, hparams, create_graph)
def get_loss(self, params, hparams):
if self.data_iterator:
data = next(self.data_iterator)
self.curr_loss = self.loss_f(params, hparams, data)
else:
self.curr_loss = self.loss_f(params, hparams)
return self.curr_loss
class GradientDescent(DifferentiableOptimizer):
def __init__(self, loss_f, step_size, data_or_iter=None):
super(GradientDescent, self).__init__(
loss_f, dim_mult=1, data_or_iter=data_or_iter
)
self.step_size_f = step_size if callable(step_size) else lambda x: step_size
def step(self, params, hparams, create_graph):
loss = self.get_loss(params, hparams)
sz = self.step_size_f(hparams)
return gd_step(params, loss, sz, create_graph=create_graph)
class HeavyBall(DifferentiableOptimizer):
def __init__(self, loss_f, step_size, momentum, data_or_iter=None):
super(HeavyBall, self).__init__(loss_f, dim_mult=2, data_or_iter=data_or_iter)
self.loss_f = loss_f
self.step_size_f = step_size if callable(step_size) else lambda x: step_size
self.momentum_f = momentum if callable(momentum) else lambda x: momentum
def step(self, params, hparams, create_graph):
n = len(params) // 2
p, p_aux = params[:n], params[n:]
loss = self.get_loss(p, hparams)
sz, mu = self.step_size_f(hparams), self.momentum_f(hparams)
p_new, p_new_aux = heavy_ball_step(
p, p_aux, loss, sz, mu, create_graph=create_graph
)
return [*p_new, *p_new_aux]
class Momentum(DifferentiableOptimizer):
"""
GD with momentum step as implemented in torch.optim.SGD
.. math::
v_{t+1} = \mu * v_{t} + g_{t+1} \\
p_{t+1} = p_{t} - lr * v_{t+1}
"""
def __init__(self, loss_f, step_size, momentum=0.9, data_or_iter=None):
super(Momentum, self).__init__(loss_f, dim_mult=2, data_or_iter=data_or_iter)
self.loss_f = loss_f
self.step_size_f = step_size if callable(step_size) else lambda x: step_size
self.momentum_f = momentum if callable(momentum) else lambda x: momentum
def step(self, params, hparams, create_graph):
n = len(params) // 2
p, p_aux = params[:n], params[n:]
loss = self.get_loss(p, hparams)
sz, mu = self.step_size_f(hparams), self.momentum_f(hparams)
p_new, p_new_aux = torch_momentum_step(
p, p_aux, loss, sz, mu, create_graph=create_graph
)
return [*p_new, *p_new_aux]
class DifferentiableAdam(DifferentiableOptimizer):
"""
DifferentiableAdam optimizer as implemented in torch.optim.Adam
.. math::
m_{t+1} = beta_1 * m_{t} + (1 - beta1) * g_{t}
u_{t+1} = beta_2 * u_{t} + (1 - beta2) * g_{t}^2
mh_{t+1} = mh_{t+1} / (1 - beta1**t)
uh_{t+1} = uh_{t+1} / (1 - beta2**t)
p_{t+1} = p_{t} - lr * mh_{t+1} / (sqrt(uh_{t+1} + eps))
"""
def __init__(
self,
loss_f,
step_size,
data_or_iter=None,
betas=(0.9, 0.999),
eps=1e-8,
step_cnt=1,
):
super(DifferentiableAdam, self).__init__(
loss_f, dim_mult=3, data_or_iter=data_or_iter
)
self.step_size_f = step_size if callable(step_size) else lambda x: step_size
(self.beta1, self.beta2) = betas
self.eps = eps
self.step_cnt = step_cnt
def step(self, params, hparams, create_graph):
n = len(params) // 3
p, m, u = params[:n], params[n : 2 * n], params[2 * n :]
loss = self.get_loss(p, hparams)
sz = self.step_size_f(hparams)
p_new, m_new, u_new = adam_step(
p,
m,
u,
loss,
sz,
self.step_cnt,
self.beta1,
self.beta2,
self.eps,
create_graph=create_graph,
)
self.step_cnt += 1
return [*p_new, *m_new, *u_new]
def gd_step(params, loss, step_size, create_graph=True):
grads = torch.autograd.grad(loss, params, create_graph=create_graph)
return [w - step_size * g for w, g in zip(params, grads)]
def heavy_ball_step(params, aux_params, loss, step_size, momentum, create_graph=True):
grads = torch.autograd.grad(loss, params, create_graph=create_graph)
return [
w - step_size * g + momentum * (w - v)
for g, w, v in zip(grads, params, aux_params)
], params
def torch_momentum_step(
params, aux_params, loss, step_size, momentum=0.9, create_graph=True
):
"""
GD with momentum step as implemented in torch.optim.SGD
.. math::
v_{t+1} = \mu * v_{t} + g_{t+1} \\
p_{t+1} = p_{t} - lr * v_{t+1}
"""
grads = torch.autograd.grad(loss, params, create_graph=create_graph)
new_aux_params = [momentum * v + g for v, g in zip(aux_params, grads)]
return [w - step_size * nv for w, nv in zip(params, new_aux_params)], new_aux_params
def adam_step(
params,
ms,
us,
loss,
step_size,
step_cnt,
beta1,
beta2,
eps,
momentum=0.9,
create_graph=True, # False when used with approximate implicit gradient; should be True otherwise
):
grads = torch.autograd.grad(loss, params, create_graph=create_graph)
new_m = [beta1 * m + (1.0 - beta1) * g for m, g in zip(ms, grads)]
new_u = [beta2 * u + (1.0 - beta2) * g**2 + 1e-12 for u, g in zip(us, grads)]
return (
[
w
- step_size
* (
m
/ (1.0 - beta1**step_cnt)
/ (torch.sqrt(u / (1 - beta2**step_cnt)) + eps)
)
for w, m, u in zip(params, new_m, new_u)
],
new_m,
new_u,
)
|
Blackbox-Coresets-VI-main
|
psvi/hypergrad/diff_optimizers.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All Rights Reserved.
from .hypergradients import *
from .diff_optimizers import *
|
Blackbox-Coresets-VI-main
|
psvi/hypergrad/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All Rights Reserved.
from typing import Callable, List
import torch
from torch import Tensor
from torch.autograd import grad as torch_grad
from . import CG_torch
# noinspection PyUnusedLocal
def reverse_unroll(
params: List[Tensor],
hparams: List[Tensor],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
set_grad=True,
) -> List[Tensor]:
"""
Computes the hypergradient by backpropagating through a previously employed inner solver procedure.
Args:
params: the output of a torch differentiable inner solver (it must depend on hparams in the torch graph)
hparams: the outer variables (or hyperparameters), each element needs requires_grad=True
outer_loss: computes the outer objective taking parameters and hyperparameters as inputs
set_grad: if True set t.grad to the hypergradient for every t in hparams
Returns:
the list of hypergradients for each element in hparams
"""
o_loss = outer_loss(params, hparams)
grads = torch.autograd.grad(o_loss, hparams, retain_graph=True)
if set_grad:
update_tensor_grads(hparams, grads)
return grads
# noinspection PyUnusedLocal
def reverse(
params_history: List[List[Tensor]],
hparams: List[Tensor],
update_map_history: List[Callable[[List[Tensor], List[Tensor]], List[Tensor]]],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
set_grad=True,
) -> List[Tensor]:
"""
Computes the hypergradient by recomputing and backpropagating through each inner update
using the inner iterates and the update maps previously employed by the inner solver.
Similarly to checkpointing, this allows to save memory w.r.t. reverse_unroll by increasing computation time.
Truncated reverse can be performed by passing only part of the trajectory information, i.e. only the
last k inner iterates and updates.
Args:
params_history: the inner iterates (from first to last)
hparams: the outer variables (or hyperparameters), each element needs requires_grad=True
update_map_history: updates used to solve the inner problem (from first to last)
outer_loss: computes the outer objective taking parameters and hyperparameters as inputs
set_grad: if True set t.grad to the hypergradient for every t in hparams
Returns:
the list of hypergradients for each element in hparams
"""
params_history = [
[w.detach().requires_grad_(True) for w in params] for params in params_history
]
o_loss = outer_loss(params_history[-1], hparams)
grad_outer_w, grad_outer_hparams = get_outer_gradients(
o_loss, params_history[-1], hparams
)
alphas = grad_outer_w
grads = [torch.zeros_like(w) for w in hparams]
K = len(params_history) - 1
for k in range(-2, -(K + 2), -1):
w_mapped = update_map_history[k + 1](params_history[k], hparams)
bs = grad_unused_zero(w_mapped, hparams, grad_outputs=alphas, retain_graph=True)
grads = [g + b for g, b in zip(grads, bs)]
alphas = torch_grad(w_mapped, params_history[k], grad_outputs=alphas)
grads = [g + v for g, v in zip(grads, grad_outer_hparams)]
if set_grad:
update_tensor_grads(hparams, grads)
return grads
def fixed_point(
params: List[Tensor],
hparams: List[Tensor],
K: int,
fp_map: Callable[[List[Tensor], List[Tensor]], List[Tensor]],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
tol=1e-10,
set_grad=True,
stochastic=False,
) -> List[Tensor]:
"""
Computes the hypergradient by applying K steps of the fixed point method (it can end earlier when tol is reached).
Args:
params: the output of the inner solver procedure.
hparams: the outer variables (or hyperparameters), each element needs requires_grad=True
K: the maximum number of fixed point iterations
fp_map: the fixed point map which defines the inner problem
outer_loss: computes the outer objective taking parameters and hyperparameters as inputs
tol: end the method earlier when the normed difference between two iterates is less than tol
set_grad: if True set t.grad to the hypergradient for every t in hparams
stochastic: set this to True when fp_map is not a deterministic function of its inputs
Returns:
the list of hypergradients for each element in hparams
"""
params = [w.detach().requires_grad_(True) for w in params]
o_loss = outer_loss(params, hparams)
grad_outer_w, grad_outer_hparams = get_outer_gradients(o_loss, params, hparams)
if not stochastic:
w_mapped = fp_map(params, hparams)
vs = [torch.zeros_like(w) for w in params]
vs_vec = cat_list_to_tensor(vs)
for _ in range(K):
vs_prev_vec = vs_vec
if stochastic:
w_mapped = fp_map(params, hparams)
vs = torch_grad(w_mapped, params, grad_outputs=vs, retain_graph=False)
else:
vs = torch_grad(w_mapped, params, grad_outputs=vs, retain_graph=True)
vs = [v + gow for v, gow in zip(vs, grad_outer_w)]
vs_vec = cat_list_to_tensor(vs)
if float(torch.norm(vs_vec - vs_prev_vec)) < tol:
break
if stochastic:
w_mapped = fp_map(params, hparams)
grads = torch_grad(w_mapped, hparams, grad_outputs=vs, allow_unused=True)
grads = [g + v if g is not None else v for g, v in zip(grads, grad_outer_hparams)]
if set_grad:
update_tensor_grads(hparams, grads)
return grads
def CG(
params: List[Tensor],
hparams: List[Tensor],
K: int,
fp_map: Callable[[List[Tensor], List[Tensor]], List[Tensor]],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
tol=1e-10,
set_grad=True,
stochastic=False,
) -> List[Tensor]:
"""
Computes the hypergradient by applying K steps of the conjugate gradient method (CG).
It can end earlier when tol is reached.
Args:
params: the output of the inner solver procedure.
hparams: the outer variables (or hyperparameters), each element needs requires_grad=True
K: the maximum number of conjugate gradient iterations
fp_map: the fixed point map which defines the inner problem
outer_loss: computes the outer objective taking parameters and hyperparameters as inputs
tol: end the method earlier when the norm of the residual is less than tol
set_grad: if True set t.grad to the hypergradient for every t in hparams
stochastic: set this to True when fp_map is not a deterministic function of its inputs
Returns:
the list of hypergradients for each element in hparams
"""
params = [w.detach().requires_grad_(True) for w in params]
o_loss = outer_loss(params, hparams)
grad_outer_w, grad_outer_hparams = get_outer_gradients(o_loss, params, hparams)
if not stochastic:
w_mapped = fp_map(params, hparams)
def dfp_map_dw(xs):
if stochastic:
w_mapped_in = fp_map(params, hparams)
Jfp_mapTv = torch_grad(
w_mapped_in, params, grad_outputs=xs, retain_graph=False
)
else:
Jfp_mapTv = torch_grad(w_mapped, params, grad_outputs=xs, retain_graph=True)
return [v - j for v, j in zip(xs, Jfp_mapTv)]
vs = CG_torch.cg(
dfp_map_dw, grad_outer_w, max_iter=K, epsilon=tol
) # K steps of conjugate gradient
if stochastic:
w_mapped = fp_map(params, hparams)
grads = torch_grad(w_mapped, hparams, grad_outputs=vs)
grads = [g + v for g, v in zip(grads, grad_outer_hparams)]
if set_grad:
update_tensor_grads(hparams, grads)
return grads
def CG_normaleq(
params: List[Tensor],
hparams: List[Tensor],
K: int,
fp_map: Callable[[List[Tensor], List[Tensor]], List[Tensor]],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
tol=1e-10,
set_grad=True,
) -> List[Tensor]:
"""Similar to CG but the conjugate gradient is applied on the normal equation (has a higher time complexity)"""
params = [w.detach().requires_grad_(True) for w in params]
o_loss = outer_loss(params, hparams)
grad_outer_w, grad_outer_hparams = get_outer_gradients(o_loss, params, hparams)
w_mapped = fp_map(params, hparams)
def dfp_map_dw(xs):
Jfp_mapTv = torch_grad(w_mapped, params, grad_outputs=xs, retain_graph=True)
v_minus_Jfp_mapTv = [v - j for v, j in zip(xs, Jfp_mapTv)]
# normal equation part
Jfp_mapv_minus_Jfp_mapJfp_mapTv = jvp(
lambda _params: fp_map(_params, hparams), params, v_minus_Jfp_mapTv
)
return [
v - vv for v, vv in zip(v_minus_Jfp_mapTv, Jfp_mapv_minus_Jfp_mapJfp_mapTv)
]
v_minus_Jfp_mapv = [
g - jfp_mapv
for g, jfp_mapv in zip(
grad_outer_w,
jvp(lambda _params: fp_map(_params, hparams), params, grad_outer_w),
)
]
vs = CG_torch.cg(
dfp_map_dw, v_minus_Jfp_mapv, max_iter=K, epsilon=tol
) # K steps of conjugate gradient
grads = torch_grad(w_mapped, hparams, grad_outputs=vs, allow_unused=True)
grads = [g + v if g is not None else v for g, v in zip(grads, grad_outer_hparams)]
if set_grad:
update_tensor_grads(hparams, grads)
return grads
def neumann(
params: List[Tensor],
hparams: List[Tensor],
K: int,
fp_map: Callable[[List[Tensor], List[Tensor]], List[Tensor]],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
tol=1e-10,
set_grad=True,
) -> List[Tensor]:
"""Saves one iteration from the fixed point method"""
# from https://arxiv.org/pdf/1803.06396.pdf, should return the same gradient of fixed point K+1
params = [w.detach().requires_grad_(True) for w in params]
o_loss = outer_loss(params, hparams)
grad_outer_w, grad_outer_hparams = get_outer_gradients(o_loss, params, hparams)
w_mapped = fp_map(params, hparams)
vs, gs = grad_outer_w, grad_outer_w
gs_vec = cat_list_to_tensor(gs)
for k in range(K):
gs_prev_vec = gs_vec
vs = torch_grad(w_mapped, params, grad_outputs=vs, retain_graph=True)
gs = [g + v for g, v in zip(gs, vs)]
gs_vec = cat_list_to_tensor(gs)
if float(torch.norm(gs_vec - gs_prev_vec)) < tol:
break
grads = torch_grad(w_mapped, hparams, grad_outputs=gs)
grads = [g + v for g, v in zip(grads, grad_outer_hparams)]
if set_grad:
update_tensor_grads(hparams, grads)
return grads
def exact(
opt_params_f: Callable[[List[Tensor]], List[Tensor]],
hparams: List[Tensor],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
set_grad=True,
) -> List[Tensor]:
"""
Computes the exact hypergradient using backpropagation and exploting the closed form torch differentiable function
that computes the optimal parameters given the hyperparameters (opt_params_f).
"""
grads = torch_grad(outer_loss(opt_params_f(hparams), hparams), hparams)
if set_grad:
update_tensor_grads(hparams, grads)
return grads
# UTILS
def grd(a, b):
return torch.autograd.grad(a, b, create_graph=True, retain_graph=True)
def list_dot(l1, l2): # extended dot product for lists
return torch.stack([(a * b).sum() for a, b in zip(l1, l2)]).sum()
def jvp(fp_map, params, vs):
dummy = [torch.ones_like(phw).requires_grad_(True) for phw in fp_map(params)]
g1 = grd(list_dot(fp_map(params), dummy), params)
return grd(list_dot(vs, g1), dummy)
def get_outer_gradients(outer_loss, params, hparams, retain_graph=True):
grad_outer_w = grad_unused_zero(outer_loss, params, retain_graph=retain_graph)
grad_outer_hparams = grad_unused_zero(
outer_loss, hparams, retain_graph=retain_graph
)
return grad_outer_w, grad_outer_hparams
def cat_list_to_tensor(list_tx):
return torch.cat([xx.view([-1]) for xx in list_tx])
def update_tensor_grads(hparams, grads):
for k, g in zip(hparams, grads):
if k.grad is None:
k.grad = torch.zeros_like(k)
if g is not None:
k.grad += g
def grad_unused_zero(
output, inputs, grad_outputs=None, retain_graph=False, create_graph=False
):
grads = torch.autograd.grad(
output,
inputs,
grad_outputs=grad_outputs,
allow_unused=True,
retain_graph=retain_graph,
create_graph=create_graph,
)
def grad_or_zeros(grad, var):
return torch.zeros_like(var) if grad is None else grad
return tuple(grad_or_zeros(g, v) for g, v in zip(grads, inputs))
|
Blackbox-Coresets-VI-main
|
psvi/hypergrad/hypergradients.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
import time
import numpy as np
import torch
import torch.distributions as dist
from torch.distributions.normal import Normal
from typing import Any, Dict
from psvi.models.logreg import model, laplace_precision, mcmc_sample, logreg_forward
from psvi.models.neural_net import categorical_fn, gaussian_fn, VILinear
from tqdm import tqdm
from psvi.experiments.experiments_utils import set_up_model, update_hyperparams_dict
from psvi.inference.utils import *
from torch.utils.data import DataLoader
from psvi.inference.psvi_classes import SubsetPreservingTransforms
from functools import partial
r"""
Implementations of baseline inference methods.
"""
def run_laplace(
theta,
mu0,
sigma0,
x_core,
y_core,
w_core,
optim_net,
inner_it=1000,
diagonal=True,
mc_samples=4,
seed=0,
**kwargs,
):
r"""
Returns samples from Laplace approximation
"""
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
for _ in range(
inner_it
): # inner loop for Laplace approximation of current coreset iterate
optim_net.zero_grad()
ll_core, prior = model(theta, mu0, sigma0, x_core, y_core, single=True)
loss = -w_core.dot(ll_core) - prior # negative log-joint
loss.backward()
optim_net.step()
optim_net.zero_grad()
with torch.no_grad():
# samples from coreset iterate
prec = laplace_precision(x_core, theta, w_core, diagonal=diagonal)
laplace_approx = (
dist.MultivariateNormal(theta, precision_matrix=prec)
if not diagonal
else Normal(theta, prec**-0.5)
)
return laplace_approx.rsample((mc_samples,)).squeeze()
def run_random(
x=None,
y=None,
xt=None,
yt=None,
mc_samples=4,
num_epochs=100,
log_every=10,
N=None,
D=None,
seed=0,
mcmc=False,
lr0net=1e-3, # initial learning rate for optimizer
**kwargs,
) -> Dict[str, Any]:
r"""
Returns diagnostics from a Laplace or an MCMC fit on a random subset of the training data
"""
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
w = torch.zeros(N).clone().detach() # coreset weights
nlls_random, accs_random, idcs_random, times_random, core_idcs = [], [], [], [0], []
x_test_aug = torch.cat((xt, torch.ones(xt.shape[0], 1)), dim=1)
x_aug = torch.cat((x, torch.ones(x.shape[0], 1)), dim=1)
t_start = time.time()
num_epochs = min(num_epochs, 2000) if mcmc else num_epochs
for it in tqdm(range(num_epochs)):
# Evaluate predictive performance of current coreset posterior
if it % log_every == 0:
if mcmc:
param_samples = mcmc_sample(sml, core_idcs, x, y, w, seed=seed)
else:
# model params prior
mu0, sigma0 = (
torch.zeros(D + 1),
torch.ones(D + 1),
)
theta0 = Normal(mu0, sigma0).rsample()
theta = torch.nn.Parameter(theta0, requires_grad=True)
optim_net = torch.optim.Adam([theta], lr0net)
param_samples = run_laplace(
theta,
mu0,
sigma0,
x_aug[core_idcs, :],
y[core_idcs],
w[core_idcs],
optim_net,
inner_it=1000,
diagonal=True,
mc_samples=mc_samples,
seed=seed,
)
times_random.append(times_random[-1] + time.time() - t_start)
test_probs = logreg_forward(param_samples, x_test_aug)
test_acc = test_probs.gt(0.5).float().eq(yt).float().mean()
test_nll = -dist.Bernoulli(probs=test_probs).log_prob(yt).mean()
nlls_random.append(test_nll.item()), accs_random.append(
test_acc.item()
), idcs_random.append(len(core_idcs))
print(f"predictive accuracy: {(100*test_acc.item()):.2f}%")
new_coreset_point = random.choice(
tuple(set(range(N)).difference(set(core_idcs)))
)
core_idcs.append(new_coreset_point) # attach a new random point
w[core_idcs] = N / len(core_idcs)
# store results
return {
"accs": accs_random,
"nlls": nlls_random,
"csizes": idcs_random,
"times": times_random[1:],
}
def run_giga(
x=None,
y=None,
xt=None,
yt=None,
mc_samples=100,
data_minibatch=512,
num_epochs=100,
log_every=10,
N=None,
D=None,
seed=0,
mcmc=False,
subset_size=200,
lr0net=1e-3,
**kwargs,
) -> Dict[str, Any]:
r"""
Returns diagnostics of a fit using the GIGA coreset (Campbell & Broderick, 2018)
"""
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
mc_samples = max(
mc_samples,
50, # overwrite arg for num of mc_samples for more fine grained vectors
)
w = (
torch.zeros(N)
.clone()
.detach()
.requires_grad_(
requires_grad=False,
)
) # coreset weights
w_pred = (
torch.zeros(N)
.clone()
.detach()
.requires_grad_(
requires_grad=False,
)
) # rescaled weights for predictions
# model params prior
mu0, sigma0 = (
torch.zeros(D + 1),
torch.ones(D + 1),
)
nlls_giga, accs_giga, idcs_giga, times_giga = [], [], [], [0]
x_aug, x_test_aug = torch.cat((x, torch.ones(x.shape[0], 1)), dim=1), torch.cat(
(xt, torch.ones(xt.shape[0], 1)), dim=1
)
core_idcs = []
t_start = time.time()
# Approximate the true posterior via MCMC sampling on a random subset
# [this computation occurs once]
sub_idcs, sum_scaling = (
np.random.randint(x.shape[0], size=subset_size),
x.shape[0] / data_minibatch,
) # sample minibatch when accessing full data and rescale corresponding log-likelihood
if mcmc:
with torch.no_grad():
param_samples = mcmc_sample(
sml,
core_idcs,
x[sub_idcs, :],
y[sub_idcs],
sum_scaling * torch.ones_like(y[sub_idcs]),
n_samples=mc_samples,
)
else:
theta0 = Normal(mu0, sigma0).rsample()
theta = torch.nn.Parameter(theta0, requires_grad=True)
param_samples = run_laplace(
theta,
mu0,
sigma0,
x_aug[sub_idcs, :],
y[sub_idcs],
sum_scaling * torch.ones_like(y[sub_idcs]),
torch.optim.Adam([theta], lr0net),
inner_it=1000,
diagonal=True,
mc_samples=mc_samples,
seed=seed,
)
lw = torch.zeros(mc_samples) # initial vector of weighted log-likelihood of coreset
# Grow the coreset for a number of iterations
for it in tqdm(range(num_epochs)):
x_core, y_core = x_aug[core_idcs, :], y[core_idcs]
sub_idcs, _ = (
np.random.randint(x.shape[0], size=data_minibatch),
x.shape[0] / data_minibatch,
) # sample minibatch when accessing full data and rescale corresponding log-likelihood
ll_all, _ = model(
param_samples,
mu0,
sigma0,
torch.cat((x_aug[sub_idcs, :], x_core)),
torch.cat((y[sub_idcs], y_core)),
)
ll_data, ll_core = ll_all[: len(sub_idcs), :], ll_all[len(sub_idcs) :, :]
ll_data, ll_core = (
ll_data - ll_data.mean(axis=1).repeat(ll_data.shape[1], 1).T,
ll_core - ll_core.mean(axis=1).repeat(ll_core.shape[1], 1).T,
)
sum_lls = ll_data.sum(axis=0)
norm_lls = torch.nn.functional.normalize(ll_data, dim=1) # ell_n
norm_sumlls = torch.nn.functional.normalize(sum_lls, dim=0) # ell
denom_sumlls = sum_lls.norm(p=2, dim=0) # ||L||
if it % log_every == 0: # log predictive performance
# Rescaling weights for unnormalized likelihoods in predictions
if len(core_idcs) > 0:
w_pred[core_idcs] = (
w[core_idcs]
* denom_sumlls
/ ll_core.norm(p=2, dim=1)
* lw.dot(norm_sumlls)
)
if mcmc:
predictive_samples = mcmc_sample(sml, core_idcs, x, y, w_pred)
else:
theta0 = Normal(mu0, sigma0).rsample()
theta = torch.nn.Parameter(theta0, requires_grad=True)
optim_net = torch.optim.Adam([theta], lr0net)
predictive_samples = run_laplace(
theta,
mu0,
sigma0,
x_aug[core_idcs, :],
y[core_idcs],
w[core_idcs].detach(),
optim_net,
inner_it=100,
diagonal=True,
mc_samples=mc_samples,
seed=seed,
)
times_giga.append(times_giga[-1] + time.time() - t_start)
test_probs = logreg_forward(predictive_samples, x_test_aug)
test_acc = test_probs.gt(0.5).float().eq(yt).float().mean()
test_nll = -dist.Bernoulli(probs=test_probs).log_prob(yt).mean()
print(f"predictive accuracy: {(100*test_acc.item()):.2f}%")
nlls_giga.append(test_nll.item())
accs_giga.append(test_acc.item())
idcs_giga.append(len(w[w > 0]))
# Compute geodesic direction of each datapoint, make greedy next point selection and compute the step size
d = torch.nn.functional.normalize(
norm_sumlls - norm_sumlls.dot(lw) * lw, dim=0
)
lwr = lw.repeat(len(sub_idcs), 1)
dns = torch.nn.functional.normalize(
norm_lls
- torch.einsum(
"n, ns -> ns", torch.einsum("ns, ns -> n", lwr, norm_lls), lwr
),
dim=1,
)
# new datapoint selection
pt_idx = sub_idcs[torch.argmax(torch.einsum("s, ns -> n", d, dns))]
if pt_idx not in core_idcs:
core_idcs.append(pt_idx) # list of coreset point indices
idx_new = -1
x_core, y_core = (
x_aug[core_idcs, :],
y[core_idcs],
) # updated coreset support
ll_all, _ = model(
param_samples,
mu0,
sigma0,
torch.cat((x_aug[sub_idcs, :], x_core)),
torch.cat((y[sub_idcs], y_core)),
)
ll_core = ll_all[len(sub_idcs) :, :]
ll_core = ll_core - ll_core.mean(axis=1).repeat(ll_core.shape[1], 1).T
norm_ll_core = torch.nn.functional.normalize(
ll_core, dim=1
) # ell_n_core
else:
idx_new = core_idcs.index(pt_idx)
zeta0, zeta1, zeta2 = (
norm_sumlls.dot(norm_ll_core[idx_new, :]),
norm_sumlls.dot(lw),
norm_ll_core[idx_new, :].dot(lw),
)
gamma = (zeta0 - zeta1 * zeta2) / (
zeta0 - zeta1 * zeta2 + zeta1 - zeta0 * zeta2
)
lw = torch.nn.functional.normalize(
(1 - gamma) * lw + gamma * norm_ll_core[idx_new, :], dim=0
)
# Optimal weight calibration
w = (
(1 - gamma) * w
+ gamma
* torch.nn.functional.one_hot(torch.tensor(pt_idx), num_classes=N)
) / torch.norm((1 - gamma) * lw + gamma * norm_ll_core[idx_new, :])
with torch.no_grad():
torch.clamp_(w, min=0)
# store results
return {
"accs": accs_giga,
"nlls": nlls_giga,
"csizes": idcs_giga,
"times": times_giga[1:],
}
def run_sparsevi(
x=None,
y=None,
xt=None,
yt=None,
mc_samples=4,
data_minibatch=128,
num_epochs=100,
log_every=10,
N=None,
D=None,
diagonal=True,
inner_it=10,
outer_it=10,
lr0net=1e-3,
lr0v=1e-1,
seed=0,
mcmc=False,
**kwargs,
) -> Dict[str, Any]: # max coreset size
r"""
Returns diagnostics of a fit using Sparse VI (Campbell & Beronov, 2019)
"""
def resc(N, w, core_idcs):
return 1. #N/sum(w[core_idcs]) if sum(w[core_idcs])>0 else 1
outer_it = min(outer_it, 500) # cap to maximum value for num_epochs and outer_it
num_epochs = min(num_epochs, 2000) if mcmc else num_epochs
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
w = (
torch.zeros(N)
.clone()
.detach()
.requires_grad_(
requires_grad=True,
)
) # coreset weights
# model params prior
mu0, sigma0 = (
torch.zeros(D + 1),
torch.ones(D + 1),
)
nlls_svi, accs_svi, idcs_svi, times_svi = [], [], [], [0]
x_aug, x_test_aug = torch.cat((x, torch.ones(x.shape[0], 1)), dim=1), torch.cat(
(xt, torch.ones(xt.shape[0], 1)), dim=1
)
# Grow the coreset for a number of iterations
core_idcs = []
t_start = time.time()
for it in tqdm(range(num_epochs)):
# Evaluate predictive performance of current coreset posterior
if it % log_every == 0:
if mcmc:
param_samples = mcmc_sample(sml, core_idcs, x, y, w)
else:
theta0 = Normal(mu0, sigma0).rsample()
theta = torch.nn.Parameter(theta0, requires_grad=True)
optim_net = torch.optim.Adam([theta], lr0net)
param_samples = run_laplace(
theta,
mu0,
sigma0,
x_aug[core_idcs, :],
y[core_idcs],
resc(N, w.detach(), core_idcs)*w[core_idcs].detach(),
torch.optim.Adam([theta], lr0net),
inner_it=1000,
diagonal=True,
mc_samples=mc_samples,
)
times_svi.append(times_svi[-1] + time.time() - t_start)
test_probs = logreg_forward(param_samples, x_test_aug)
test_acc = test_probs.gt(0.5).float().eq(yt).float().mean()
test_nll = -dist.Bernoulli(probs=test_probs).log_prob(yt).mean()
nlls_svi.append(test_nll.item())
accs_svi.append(test_acc.item())
idcs_svi.append(len(core_idcs))
print(f"predictive accuracy: {(100*test_acc.item()):.2f}%")
# 1. Compute current coreset posterior using Laplace approximation on coreset points
sub_idcs, sum_scaling = (
np.random.randint(x.shape[0], size=data_minibatch),
x.shape[0] / data_minibatch,
) # sample minibatch when accessing full data and rescale corresponding log-likelihood
x_core, y_core = x_aug[core_idcs, :], y[core_idcs]
theta0 = Normal(mu0, sigma0).rsample()
theta = torch.nn.Parameter(theta0, requires_grad=True)
optim_net = torch.optim.Adam([theta], lr0net)
for _ in range(
inner_it
): # inner loop for Laplace approximation of current coreset iterate
optim_net.zero_grad()
ll_core, prior = model(theta, mu0, sigma0, x_core, y_core, single=True)
loss = -resc(N, w, core_idcs)*w[core_idcs].dot(ll_core) - prior # negative log-joint
loss.backward()
optim_net.step()
with torch.no_grad():
# samples from coreset iterate
prec = laplace_precision(x_core, theta, resc(N, w, core_idcs)*w[core_idcs], diagonal=diagonal)
laplace_approx = (
dist.MultivariateNormal(theta, precision_matrix=prec)
if not diagonal
else Normal(theta, prec**-0.5)
)
param_samples = laplace_approx.rsample((mc_samples,)).squeeze()
# 2. Compute loglikelihoods for each sample
ll_all, _ = model(
param_samples,
mu0,
sigma0,
torch.cat((x_aug[sub_idcs, :], x_core)),
torch.cat((y[sub_idcs], y_core)),
)
ll_data, ll_core = ll_all[: len(sub_idcs), :], ll_all[len(sub_idcs) :, :]
cll_data, cll_core = (
ll_data - ll_data.mean(axis=1).repeat(ll_data.shape[1], 1).T,
ll_core - ll_core.mean(axis=1).repeat(ll_core.shape[1], 1).T,
)
# 3. Select point to attach to the coreset next
resid = sum_scaling * cll_data.sum(axis=0) - resc(N, w, core_idcs)*w[core_idcs].matmul(cll_core)
corrs = (
cll_data.matmul(resid)
/ torch.sqrt((cll_data**2).sum(axis=1))
/ cll_data.shape[1]
)
corecorrs = (
torch.abs(cll_core.matmul(resid))
/ torch.sqrt((cll_core**2).sum(axis=1))
/ cll_core.shape[1]
)
if corecorrs.shape[0] == 0 or corrs.max() > corecorrs.max():
pt_idx = sub_idcs[torch.argmax(corrs)]
print(f"\nAdding new point. Support increased to {len(core_idcs)+1} \n") if pt_idx not in core_idcs else print("\nImproving fit with current support \n")
core_idcs.append(pt_idx) if pt_idx not in core_idcs else None
else:
print("\nImproving fit with current support \n")
print(f"weights vector {(resc(N, w, core_idcs)*w[w>0]).sum()}")
# 4. Sample for updated weights and take projected gradient descent steps on the weights
# sample from updated model
x_core, y_core = x_aug[core_idcs, :], y[core_idcs]
optim_w = torch.optim.Adam([w], lr0v) #/(1. + it))
theta0 = Normal(mu0, sigma0).rsample()
theta = torch.nn.Parameter(theta0, requires_grad=True)
for _ in range(outer_it):
optim_net = torch.optim.Adam([theta], lr0net)
for _ in range(
inner_it
): # inner loop for Laplace approximation of current coreset iterate
# negative log-joint
optim_net.zero_grad()
ll, prior = model(theta, mu0, sigma0, x_core, y_core, single=True)
loss = -resc(N, w, core_idcs)*w[core_idcs].dot(ll) - prior
loss.backward()
optim_net.step()
with torch.no_grad():
# samples from coreset iterate
prec = laplace_precision(x_core, theta, resc(N, w, core_idcs)*w[core_idcs], diagonal=diagonal)
laplace_approx = (
dist.MultivariateNormal(theta, precision_matrix=prec)
if not diagonal
else Normal(theta, prec**-0.5)
)
param_samples = laplace_approx.rsample((mc_samples,)).squeeze()
sub_idcs, sum_scaling = (
np.random.randint(x_aug.shape[0], size=data_minibatch),
x.shape[0] / data_minibatch,
) # sample minibatch when accessing full data and rescale corresponding log-likelihood
# compute w_grad
ll_all, _ = model(
param_samples,
mu0,
sigma0,
torch.cat((x_aug[sub_idcs, :], x_core)),
torch.cat((y[sub_idcs], y_core)),
)
ll_data, ll_core = (
ll_all[: len(sub_idcs), :],
ll_all[len(sub_idcs) :, :],
)
cll_data, cll_core = (
ll_data - ll_data.mean(axis=1).repeat(ll_data.shape[1], 1).T,
ll_core - ll_core.mean(axis=1).repeat(ll_core.shape[1], 1).T,
)
resid = sum_scaling * cll_data.sum(axis=0) - resc(N, w, core_idcs) * w[core_idcs].matmul(
cll_core
)
w.grad.data[core_idcs] = (-cll_core.matmul(resid) / cll_core.shape[1]) / resc(N, w, core_idcs)
optim_w.step()
with torch.no_grad():
torch.clamp_(w, 0)
# store results
return {
"nlls": nlls_svi,
"accs": accs_svi,
"csizes": idcs_svi,
"times": times_svi[1:],
}
def run_opsvi(
x=None,
y=None,
xt=None,
yt=None,
mc_samples=10,
data_minibatch=128,
num_epochs=100,
log_every=10,
N=None,
D=None,
num_pseudo=10,
inner_it=10,
diagonal=True,
lr0net=1e-3,
lr0u=1e-3,
lr0v=1e-3,
register_elbos=False,
init_args="subsample",
seed=0,
mcmc=False,
log_pseudodata=False,
**kwargs,
) -> Dict[str, Any]:
r"""
Returns diagnostics of a fit using the original PSVI construction (Manousakas et al, 2020)
"""
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
us, zs, ws, core_idcs_opsvi, elbos_opsvi = [], [], [], [], []
nlls_opsvi, accs_opsvi, idcs_opsvi, times_opsvi = [], [], [], [0]
with torch.no_grad():
w = N / num_pseudo * (torch.ones(num_pseudo).clone().detach())
w.requires_grad_(
requires_grad=True,
) # coreset weights
# model params prior
mu0, sigma0 = (
torch.zeros(D + 1),
torch.ones(D + 1),
)
theta0 = Normal(mu0, sigma0).rsample()
theta = torch.nn.Parameter(theta0, requires_grad=True)
x_aug, x_test_aug = torch.cat((x, torch.ones(x.shape[0], 1)), dim=1), torch.cat(
(xt, torch.ones(xt.shape[0], 1)), dim=1
)
# initialization of pseudodata
with torch.no_grad():
u, z = (
pseudo_rand_init(x, y, num_pseudo=num_pseudo, seed=seed)
if init_args == "random"
else pseudo_subsample_init(x, y, num_pseudo=num_pseudo, seed=seed)
)
u, z = (
torch.cat((u, torch.ones(u.shape[0], 1)), dim=1)
.clone()
.detach()
.requires_grad_(True)
).float(), z.float()
optim_net = torch.optim.Adam([theta], lr0net)
optim_u = torch.optim.Adam([u], lr0u)
optim_w = torch.optim.Adam([w], lr0v * N)
t_start = time.time()
for it in tqdm(range(num_epochs)):
# Evaluate predictive performance of current coreset posterior
if it % log_every == 0:
param_samples = (
mcmc_sample(sml, list(range(num_pseudo)), u[:, :-1], z, w)
if mcmc
else run_laplace(
theta,
mu0,
sigma0,
u,
z,
w.detach(),
torch.optim.Adam([theta], lr0net),
inner_it=inner_it,
diagonal=True,
mc_samples=mc_samples,
seed=seed,
)
)
times_opsvi.append(times_opsvi[-1] + time.time() - t_start)
test_probs = logreg_forward(param_samples, x_test_aug)
test_acc = test_probs.gt(0.5).float().eq(yt).float().mean()
test_nll = -dist.Bernoulli(probs=test_probs).log_prob(yt).mean()
core_idcs_opsvi.append(num_pseudo)
nlls_opsvi.append(test_nll.item())
accs_opsvi.append(test_acc.item())
idcs_opsvi.append(num_pseudo)
print(f"predictive accuracy: {(100*test_acc.item()):.2f}%")
us.append(u.detach().numpy())
zs.append(z.detach().numpy())
ws.append(w.detach().numpy())
# 1. Compute current coreset posterior using Laplace approximation on coreset points
x_core, y_core = u, z
# Sample for updated weights and take projected gradient descent steps on the weights
optim_net = torch.optim.Adam([theta], lr0net)
for in_it in range(
inner_it
): # inner loop for Laplace approximation of current coreset iterate
# negative log-joint
optim_net.zero_grad()
ll, prior = model(theta, mu0, sigma0, x_core, y_core, single=True)
loss = -w.dot(ll) - prior
loss.backward()
if register_elbos and in_it % log_every == 0:
with torch.no_grad():
elbos_opsvi.append((1, -loss.item()))
optim_net.step()
optim_w.zero_grad()
optim_u.zero_grad()
with torch.no_grad():
# samples from coreset iterate
prec = laplace_precision(x_core, theta, w, diagonal=diagonal)
laplace_approx = (
dist.MultivariateNormal(theta, precision_matrix=prec)
if not diagonal
else Normal(theta, prec**-0.5)
)
param_samples = laplace_approx.rsample((mc_samples,)).squeeze()
sub_idcs, sum_scaling = (
np.random.randint(x_aug.shape[0], size=data_minibatch),
x.shape[0] / data_minibatch,
) # sample minibatch when accessing full data and rescale corresponding log-likelihood
# compute w_grad and u_grad
ll_all, _ = model(
param_samples,
mu0,
sigma0,
torch.cat((x_aug[sub_idcs, :], x_core)),
torch.cat((y[sub_idcs], y_core)),
)
ll_data, ll_core = (
ll_all[: len(sub_idcs), :],
ll_all[len(sub_idcs) :, :],
)
cll_data, cll_core = (
ll_data - ll_data.mean(axis=1).repeat(ll_data.shape[1], 1).T,
ll_core - ll_core.mean(axis=1).repeat(ll_core.shape[1], 1).T,
)
resid = sum_scaling * cll_data.sum(axis=0) - w.matmul(cll_core)
w.grad.data = -cll_core.matmul(resid) / cll_core.shape[1]
u_function = (
torch.matmul(torch.einsum("m,ms->s", -w.detach(), cll_core), resid.detach())
/ cll_core.shape[1]
)
u.grad.data = torch.autograd.grad(u_function, u)[0]
u.grad.data[:, -1] = 0 # zero gradient on the last column
optim_w.step()
optim_u.step()
with torch.no_grad():
torch.clamp_(w, 0)
# store results
results = {
"accs": accs_opsvi,
"nlls": nlls_opsvi,
"csizes": core_idcs_opsvi,
"times": times_opsvi[1:],
"elbos": elbos_opsvi,
}
if log_pseudodata:
results["us"], results["zs"], results["vs"] = us, zs, ws
return results
def run_mfvi(
xt=None,
yt=None,
mc_samples=4,
data_minibatch=128,
num_epochs=100,
log_every=10,
N=None,
D=None,
lr0net=1e-3, # initial learning rate for optimizer
mul_fact=2, # multiplicative factor for total number of gradient iterations in classical vi methods
seed=0,
distr_fn=categorical_fn,
architecture=None,
n_hidden=None,
nc=2,
log_pseudodata=False,
train_dataset=None,
test_dataset=None,
init_sd=None,
**kwargs,
) -> Dict[str, Any]:
r"""
Returns diagnostics using a mean-field VI fit on the full training dataset. Implementation supporting pytorch dataloaders
(To be used only in the BNN experiment flows)
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
nlls_mfvi, accs_mfvi, times_mfvi, elbos_mfvi, grid_preds = [], [], [0], [], []
t_start = time.time()
net = set_up_model(
architecture=architecture, D=D, n_hidden=n_hidden, nc=nc, mc_samples=mc_samples, init_sd=init_sd,
).to(device)
train_loader = DataLoader(
train_dataset,
batch_size=data_minibatch,
pin_memory=True,
shuffle=True,
)
n_train = len(train_loader.dataset)
test_loader = DataLoader(
test_dataset,
batch_size=data_minibatch,
pin_memory=True,
shuffle=True,
)
optim_vi = torch.optim.Adam(net.parameters(), lr0net)
total_iterations = mul_fact * num_epochs
checkpts = list(range(mul_fact * num_epochs))[::log_every]
lpit = [checkpts[idx] for idx in [0, len(checkpts) // 2, -1]]
for i in tqdm(range(total_iterations)):
xbatch, ybatch = next(iter(train_loader))
xbatch, ybatch = xbatch.to(device, non_blocking=True), ybatch.to(
device, non_blocking=True
)
optim_vi.zero_grad()
data_nll = -(
n_train
/ xbatch.shape[0]
* distr_fn(logits=net(xbatch).squeeze(-1)).log_prob(ybatch).sum()
)
kl = sum(m.kl() for m in net.modules() if isinstance(m, VILinear))
mfvi_loss = data_nll + kl
mfvi_loss.backward()
optim_vi.step()
with torch.no_grad():
elbos_mfvi.append(-mfvi_loss.item())
if i % log_every == 0 or i == total_iterations -1:
total, test_nll, corrects = 0, 0, 0
for xt, yt in test_loader:
xt, yt = xt.to(device, non_blocking=True), yt.to(
device, non_blocking=True
)
with torch.no_grad():
test_logits = net(xt).squeeze(-1).mean(0)
corrects += test_logits.argmax(-1).float().eq(yt).float().sum()
total += yt.size(0)
test_nll += -distr_fn(logits=test_logits).log_prob(yt).sum()
if log_pseudodata and i in lpit:
grid_preds.append(pred_on_grid(net, device=device).detach().cpu().numpy().T)
times_mfvi.append(times_mfvi[-1] + time.time() - t_start)
nlls_mfvi.append((test_nll / float(total)).item())
accs_mfvi.append((corrects / float(total)).item())
print(f"predictive accuracy: {(100*accs_mfvi[-1]):.2f}%")
# store results
results = {
"accs": accs_mfvi,
"nlls": nlls_mfvi,
"times": times_mfvi[1:],
"elbos": elbos_mfvi,
"csizes": None,
}
if log_pseudodata:
results["grid_preds"] = grid_preds
return results
def run_mfvi_subset(
x=None,
y=None,
xt=None,
yt=None,
mc_samples=4,
data_minibatch=128,
num_epochs=100,
log_every=10,
D=None,
lr0net=1e-3, # initial learning rate for optimizer
mul_fact=2, # multiplicative factor for total number of gradient iterations in classical vi methods
seed=0,
distr_fn=categorical_fn,
log_pseudodata=False,
train_dataset=None,
test_dataset=None,
num_pseudo=100, # constrain on random subset with size equal to the max coreset size in the experiment
init_args="subsample",
architecture=None,
n_hidden=None,
nc=2,
dnm=None,
init_sd=None,
**kwargs,
) -> Dict[str, Any]:
r"""
Returns diagnostics using a mean-field VI fit on a random subset of the training dataset with specified size. Implementation supporting pytorch dataloaders
(To be used only in the BNN experiment flows)
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
nlls_mfvi, accs_mfvi, times_mfvi, elbos_mfvi, grid_preds = [], [], [0], [], []
t_start = time.time()
net = set_up_model(
architecture=architecture, D=D, n_hidden=n_hidden, nc=nc, mc_samples=mc_samples, init_sd=init_sd,
).to(device)
if dnm=="MNIST":
train_loader = DataLoader(
train_dataset,
batch_size=data_minibatch,
# pin_memory=True,
shuffle=True,
)
n_train = len(train_loader.dataset)
points_per_class = [num_pseudo // nc] * nc # split equally among classes
points_per_class[-1] = num_pseudo - sum(points_per_class[:-1])
ybatch = (
torch.tensor(
[
item
for sublist in [[i] * ppc for i, ppc in enumerate(points_per_class)]
for item in sublist
]
)
.float()
.to(device, non_blocking=True)
)
def get_x_from_label(ipc, _l):
indices = (
torch.as_tensor(train_dataset.targets).clone().detach() == _l
).nonzero()
return torch.utils.data.DataLoader(
SubsetPreservingTransforms(train_dataset, indices=indices, dnm=dnm),
batch_size=ipc,
shuffle=True,
)
distilled_lst = []
for c in range(nc):
u0 = next(iter(get_x_from_label(points_per_class[c], c)))
distilled_lst.append(u0.to(device=device, non_blocking=True))
xbatch = torch.cat(distilled_lst).to(device, non_blocking=True)
else:
xbatch, ybatch = (
pseudo_rand_init(x, y, num_pseudo=num_pseudo, seed=seed, nc=nc)
if init_args == "random"
else pseudo_subsample_init(x, y, num_pseudo=num_pseudo, seed=seed, nc=nc)
)
n_train = len(train_dataset)
test_loader = DataLoader(
test_dataset,
batch_size=data_minibatch,
pin_memory=True,
shuffle=True,
)
optim_vi = torch.optim.Adam(net.parameters(), lr0net)
sum_scaling = n_train / num_pseudo
total_iterations = mul_fact * num_epochs
checkpts = list(range(mul_fact * num_epochs))[::log_every]
lpit = [checkpts[idx] for idx in [0, len(checkpts) // 2, -1]]
for i in tqdm(range(total_iterations)):
xbatch, ybatch = xbatch.to(device), ybatch.to(device)
optim_vi.zero_grad()
data_nll = (
-sum_scaling
* distr_fn(logits=net(xbatch).squeeze(-1)).log_prob(ybatch).sum()
)
kl = sum(m.kl() for m in net.modules() if isinstance(m, VILinear))
mfvi_loss = data_nll + kl
mfvi_loss.backward()
optim_vi.step()
with torch.no_grad():
elbos_mfvi.append(-mfvi_loss.item())
if i % log_every == 0:
total, test_nll, corrects = 0, 0, 0
for xt, yt in test_loader:
xt, yt = xt.to(device, non_blocking=True), yt.to(
device, non_blocking=True
)
with torch.no_grad():
test_logits = net(xt).squeeze(-1).mean(0)
corrects += test_logits.argmax(-1).float().eq(yt).float().sum()
total += yt.size(0)
test_nll += -distr_fn(logits=test_logits).log_prob(yt).sum()
if log_pseudodata and i in lpit:
grid_preds.append(pred_on_grid(net, device=
device).detach().cpu().numpy().T)
times_mfvi.append(times_mfvi[-1] + time.time() - t_start)
nlls_mfvi.append((test_nll / float(total)).item())
accs_mfvi.append((corrects / float(total)).item())
print(f"predictive accuracy: {(100*accs_mfvi[-1]):.2f}%")
# store results
results = {
"accs": accs_mfvi,
"nlls": nlls_mfvi,
"times": times_mfvi[1:],
"elbos": elbos_mfvi,
"csizes": [num_pseudo] * (mul_fact * num_epochs),
}
if log_pseudodata:
results["grid_preds"] = grid_preds
results["us"], results["zs"], results["vs"] = xbatch.detach(), ybatch.detach(), [sum_scaling]*num_pseudo
return results
# MFVI for BNN regression
def run_mfvi_regressor(
mc_samples=4,
data_minibatch=128,
num_epochs=100,
log_every=10,
D=None,
lr0net=1e-3, # initial learning rate for optimizer
seed=0,
architecture=None,
n_hidden=None,
train_dataset=None,
val_dataset=None,
test_dataset=None,
nc=1,
y_mean=None,
y_std=None,
taus=None,
init_sd=1e-6,
model_selection = True,
dnm=None,
**kwargs,
) -> Dict[str, Any]:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
# normalized x train, normalized targets
train_loader = DataLoader(
train_dataset,
batch_size=data_minibatch,
# pin_memory=True,
shuffle=False,
)
# normalized x test, unnormalized targets
test_loader, val_loader, n_train = (
DataLoader(
test_dataset,
batch_size=data_minibatch,
# pin_memory=True,
shuffle=False,
),
DataLoader(
val_dataset,
batch_size=data_minibatch,
# pin_memory=True,
shuffle=False,
),
len(train_loader.dataset),
)
bpe = max(1, int(n_train / data_minibatch)) # batches per epoch
def revert_norm(y_pred):
return y_pred * y_std + y_mean
best_tau, best_ll = taus[0], -float("inf")
if model_selection:
# model selection
print("\nOptimizing precision hyperparameter")
for tau in taus:
print(f"\n\nTrying tau = {tau}")
net = set_up_model(
architecture=architecture,
D=D,
n_hidden=n_hidden,
nc=nc,
mc_samples=mc_samples,
init_sd=init_sd,
).to(device)
optim_vi = torch.optim.Adam(net.parameters(), lr0net)
tau_fit = fit(
net=net,
optim_vi=optim_vi,
train_loader=train_loader,
pred_loader=val_loader,
revert_norm=revert_norm,
log_every=-1,
tau=tau,
epochs=num_epochs * bpe,
device=device,
)
if tau_fit["lls"][-1] > best_ll:
best_tau, best_ll = tau, tau_fit["lls"][-1]
print(f"current best tau, best ll : {best_tau}, {best_ll}")
else:
best_tau = taus[0]
print(f"\n\nselected tau : {best_tau}\n\n")
update_hyperparams_dict(dnm, best_tau)
net = set_up_model(
architecture=architecture,
D=D,
n_hidden=n_hidden,
nc=nc,
mc_samples=mc_samples,
init_sd=init_sd,
).to(device)
optim_vi = torch.optim.Adam(net.parameters(), lr0net)
results = fit(
net=net,
optim_vi=optim_vi,
train_loader=train_loader,
pred_loader=test_loader,
revert_norm=revert_norm,
log_every=log_every,
tau=best_tau,
epochs=num_epochs * bpe,
device=device,
)
return results
# MFVI Subset for BNN regression
def run_mfvi_subset_regressor(
mc_samples=4,
data_minibatch=128,
num_epochs=100,
log_every=10,
D=None,
lr0net=1e-3, # initial learning rate for optimizer
seed=0,
architecture=None,
n_hidden=None,
train_dataset=None,
val_dataset=None,
test_dataset=None,
nc=1,
y_mean=None,
y_std=None,
init_sd=1e-6,
num_pseudo=100, # constrain on random subset with size equal to the max coreset size in the experiment
taus=None,
model_selection = False,
**kwargs,
) -> Dict[str, Any]:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
# normalized x train, normalized targets
sample_idcs = random.sample(range(len(train_dataset)), num_pseudo)
subset_train_dataset = torch.utils.data.Subset(train_dataset, sample_idcs)
subset_train_loader = DataLoader(
subset_train_dataset,
batch_size=num_pseudo,
# pin_memory=True,
shuffle=False,
)
# normalized x test, unnormalized targets
test_loader, val_loader, n_train = (
DataLoader(
test_dataset,
batch_size=data_minibatch,
# pin_memory=True,
shuffle=False,
),
DataLoader(
val_dataset,
batch_size=data_minibatch,
# pin_memory=True,
shuffle=False,
),
len(train_dataset),
)
bpe = max(1, int(n_train / data_minibatch)) # batches per epoch
def revert_norm(y_pred):
return y_pred * y_std + y_mean
best_tau, best_ll = taus[0], -float("inf")
if model_selection:
# model selection
print("\nOptimizing precision hyperparameter")
for tau in taus:
print(f"\n\nTrying tau = {tau}")
net = set_up_model(
architecture=architecture,
D=D,
n_hidden=n_hidden,
nc=nc,
mc_samples=mc_samples,
init_sd=init_sd,
).to(device)
optim_vi = torch.optim.Adam(net.parameters(), lr0net)
tau_fit = fit(
net=net,
optim_vi=optim_vi,
train_loader=subset_train_loader,
pred_loader=val_loader,
revert_norm=revert_norm,
log_every=-1,
tau=tau,
epochs=num_epochs * bpe,
device=device,
)
if tau_fit["lls"][-1] > best_ll:
best_tau, best_ll = tau, tau_fit["lls"][-1]
print(f"current best tau, best ll : {best_tau}, {best_ll}")
else:
best_tau = taus[0]
print(f"\n\nselected tau : {best_tau}\n\n")
net = set_up_model(
architecture=architecture,
D=D,
n_hidden=n_hidden,
nc=nc,
mc_samples=mc_samples,
init_sd=init_sd,
).to(device)
optim_vi = torch.optim.Adam(net.parameters(), lr0net)
results = fit(
net=net,
optim_vi=optim_vi,
train_loader=subset_train_loader,
pred_loader=test_loader,
revert_norm=revert_norm,
log_every=log_every,
tau=best_tau,
epochs=num_epochs * bpe,
device=device,
)
results["csizes"] = [num_pseudo]
return results
# fit mean-field BNN using the standard ELBO and log predictive performance
def fit(
net=None,
optim_vi=None,
train_loader=None,
pred_loader=None,
revert_norm=None,
log_every=-1,
tau=1e-2,
epochs=40,
device=None,
):
distr_fn = partial(gaussian_fn, scale=1.0 / np.sqrt(tau))
logging_checkpoint = (
lambda it: (it % log_every) == 0 if log_every > 0 else it == (epochs - 1)
) # if log_every==-1 then log pred perf only at the end of training
lls, rmses, times, elbos = [], [], [0], []
t_start = time.time()
n_train = len(train_loader.dataset)
for e in tqdm(range(epochs)):
xbatch, ybatch = next(iter(train_loader))
xbatch, ybatch = xbatch.to(device, non_blocking=True), ybatch.to(
device, non_blocking=True
)
optim_vi.zero_grad()
data_nll = -(
n_train
/ xbatch.shape[0]
* distr_fn(net(xbatch).squeeze(-1)).log_prob(ybatch.squeeze()).sum()
)
kl = sum(m.kl() for m in net.modules() if isinstance(m, VILinear))
loss = data_nll + kl
loss.backward()
optim_vi.step()
with torch.no_grad():
elbos.append(-loss.item())
if logging_checkpoint(e):
total, test_ll, rmses_unnorm = 0, 0, 0
for (xt, yt) in pred_loader:
xt, yt = (
xt.to(device, non_blocking=True),
yt.to(device, non_blocking=True).squeeze(),
)
with torch.no_grad():
y_pred = net(xt).squeeze(-1)
y_pred = revert_norm(y_pred).mean(0).squeeze()
rmses_unnorm += (y_pred - yt).square().sum()
total += yt.size(0)
test_ll += distr_fn(y_pred).log_prob(yt.squeeze()).sum()
times.append(times[-1] + time.time() - t_start)
lls.append((test_ll / float(total)).item())
rmses.append((rmses_unnorm / float(total)).sqrt().item())
print(
f" \n\n\n Predictive rmse {rmses[-1]:.2f} | pred ll {lls[-1]:.2f}"
)
results = {
"rmses": rmses,
"lls": lls,
"times": times[1:],
"elbos": elbos,
"scale": 1.0 / np.sqrt(tau),
}
return results
|
Blackbox-Coresets-VI-main
|
psvi/inference/baselines.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
Blackbox-Coresets-VI-main
|
psvi/inference/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
r"""
Black-box PSVI parent and children classes accessing the dataset via pytorch dataloaders.
"""
import time
import random
import numpy as np
from sklearn.utils import shuffle
import torch
import torch.nn as nn
from PIL import Image
from psvi.experiments.experiments_utils import SynthDataset
from psvi.hypergrad.diff_optimizers import DifferentiableAdam, GradientDescent
from psvi.hypergrad.hypergradients import CG_normaleq, fixed_point
from psvi.models.neural_net import (
set_mc_samples,
categorical_fn,
gaussian_fn,
make_fcnet,
make_fc2net,
make_lenet,
make_alexnet,
make_regressor_net,
VILinear,
VILinearMultivariateNormal,
)
from psvi.robust_higher import innerloop_ctx
from psvi.robust_higher.patch import monkeypatch
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from functools import partial
from psvi.inference.utils import make_dataloader, compute_empirical_mean
class SubsetPreservingTransforms(Dataset):
r"""
Subset of a dataset at specified indices with a specified list of transforms.
Arguments:
dataset (Dataset): The whole Dataset
indices (sequence): Indices in the whole set selected for subset
"""
def __init__(self, dataset, indices=None, dim=2, dnm="Cifar10"):
self.dataset = dataset
self.indices = indices
self.dnm = dnm
self.dim = dim
def __getitem__(self, idx):
if self.dnm not in {"MNIST", "Cifar10"}:
return self.dataset.data[self.indices[idx]].reshape((self.dim,))
im = (
Image.fromarray(self.dataset.data[self.indices[idx]]) # Cifar10
if not self.dnm == "MNIST"
else Image.fromarray(
np.reshape(self.dataset.data[self.indices[idx]].numpy(), (28, 28)),
mode="L", # MNIST
) # TBC: Supporting only Cifar10 and MNIST
)
return self.dataset.transform(im)
def __len__(self):
return len(self.indices)
class PSVI(object):
r"""
PSVI
- with fixed rescaled coefficients on pseudodata supporting pytorch dataloaders
"""
def __init__(
self,
u=None, # pseudo x-coordinates
z=None, # pseudo y-coordinates
train_dataset=None, # true training data
test_dataset=None, # test data
N=None, # size of training data
D=None, # dimensionality of training data
model=None, # statistical model
optim=None, # joint variational model/pseudodata optimizer
optim_u=None, # optimizer for pseudodata
optim_net=None, # optimizer for variational model parameters
optim_v=None, # optimizer for log-likelihood rescaling vector
optim_z=None, # optimizer for soft labels on distilled data
register_elbos=True, # register values of objectives over inference
num_pseudo=None, # number of pseudodata
seed=0, # random seed for instantiation of the method (for reproducibility)
compute_weights_entropy=True, # compute the entropy of weights distribution used in importance sampling
mc_samples=None, # number of MC samples for computation of variational objectives and predictions on unseen data
reset=False, # reset variational parameters to initialization
reset_interval=10, # number of outer gradient steps between reinitializations
learn_v=False, # boolean indicating if the v vector is learnable
f=lambda *x: x[0], # transformation applied on the v vector
distr_fn=categorical_fn, # distribution of last nn layer
dnm="MNIST", # dataset name
nc=10, # number of classes (argument supported only for the psvi dataloader subclasses)
init_dataset=None, # populated when picking initializations from a disturbed version of the original datapoints
parameterised=False,
learn_z=False, # optimize in the label space
prune=False, # apply prunning over coreset training
prune_interval=None, # corresponding number of outer updates for prunning
prune_sizes=None, # list with budgets for pruned coreset
increment=False, # incremental learning setting
increment_interval=None, # corresponding number of outer updates between incrementing with new learning task
increment_sizes=None, # list of increasing coreset sizes after incrementally introducing new learning tasks
lr0alpha=1e-3,
retrain_on_coreset=False, # retrain variational parameters only on coreset datapoints after extracting a coreset using joint optimizer on the PSVI ELBO
device_id=None,
**kwargs,
):
np.random.seed(seed), torch.manual_seed(seed)
self.device = torch.device( f"cuda:{device_id}" if device_id else ("cuda" if torch.cuda.is_available() else "cpu"))
self.u, self.z = u, z
self.train_dataset, self.test_dataset = (
train_dataset,
test_dataset,
)
self.N, self.D, self.dnm = N, D, dnm
self.nc = nc # number of classes
self.distr_fn = distr_fn
(
self.model,
self.optim,
self.optim_u,
self.optim_net,
self.optim_v,
self.optim_z,
) = (
model,
optim,
optim_u,
optim_net,
optim_v,
optim_z,
)
self.register_elbos, self.compute_weights_entropy = (
register_elbos,
compute_weights_entropy,
)
self.elbos = []
self.num_pseudo, self.mc_samples = num_pseudo if not increment else increment_sizes[0], mc_samples
self.reset, self.reset_interval, self.learn_v, self.learn_z = (
reset,
reset_interval,
learn_v,
learn_z,
)
with torch.no_grad():
self.v = (
1.0 / self.num_pseudo * torch.ones(self.num_pseudo, device=self.device)
)
self.v.requires_grad_(
self.learn_v
) # initialize weights of coreset pseudodata to uniform and set to differentiable or not according to attribute learn_v
self.f, self.parameterised = f, parameterised
self.init_dataset = init_dataset
self.results = {}
self.prune, self.prune_interval, self.prune_sizes = (
prune,
prune_interval,
prune_sizes,
)
self.increment, self.increment_interval, self.increment_sizes = (
increment,
increment_interval,
increment_sizes,
)
if self.increment:
self.historical_coresets = []
self.lr0alpha = lr0alpha
self.retrain_on_coreset = retrain_on_coreset
def pseudo_subsample_init(self):
r"""
Initialization of pseudodata on random data subset with equal number of datapoints from each class
"""
chosen_dataset = (
self.train_dataset if self.init_dataset is None else self.init_dataset
)
# set up pseudodata by initializing to random subset from the existing dataset
points_per_class = [
self.num_pseudo // self.nc
] * self.nc # split equally among classes
points_per_class[-1] = self.num_pseudo - sum(
points_per_class[:-1]
) # assigning the remainder to the last class
with torch.no_grad():
self.z = (
torch.tensor(
[
item
for sublist in [
[i] * ppc for i, ppc in enumerate(points_per_class)
]
for item in sublist
]
)
.float()
.to(self.device, non_blocking=True)
)
if self.learn_z:
self.z = torch.nn.functional.one_hot(
self.z.to(torch.int64),
num_classes=self.nc,
).float() # initialize target logits close to one-hot-encoding [0,..., class, ..., 0]-vectors
self.z.requires_grad_(True)
def get_x_from_label(ipc, _l):
indices = (
torch.as_tensor(chosen_dataset.targets).clone().detach() == _l
).nonzero()
return torch.utils.data.DataLoader(
SubsetPreservingTransforms(
chosen_dataset,
indices=indices,
dnm=self.dnm,
dim=self.D,
),
batch_size=ipc,
shuffle=True,
)
distilled_lst = []
for c in range(self.nc):
u0 = next(iter(get_x_from_label(points_per_class[c], c)))
distilled_lst.append(u0.to(device=self.device, non_blocking=True))
self.u = torch.cat(distilled_lst).requires_grad_(True)
def pseudo_rand_init(self, variance=1.):
r"""
Initialize on noisy means of the observed datapoints and random labels equally split among classes
"""
# print(f"is leaf : {self.u.is_leaf}")
self.u = (
(compute_empirical_mean(self.train_loader) + variance * torch.randn(self.num_pseudo, self.D))
.clone()
).to(self.device).requires_grad_(True)
self.z = torch.Tensor([])
for c in range(self.nc):
self.z = torch.cat(
(
self.z.to(self.device),
c
* torch.ones(
self.num_pseudo // self.nc
if c < self.nc - 1
else self.num_pseudo - (self.nc - 1) * (self.num_pseudo // self.nc)
).to(self.device),
)
)
def psvi_elbo(self, xbatch, ybatch, model=None, params=None, hyperopt=False):
r"""
PSVI objective computation [negative PSVI-ELBO]
"""
assert self.mc_samples > 1
Nu, Nx = self.u.shape[0], xbatch.shape[0]
all_data, all_labels = torch.cat((self.u, xbatch)), torch.cat(
(
self.z,
ybatch
if not self.learn_z
else self.nc
* torch.nn.functional.one_hot(
ybatch.to(torch.int64),
num_classes=self.nc,
).float(),
)
)
logits = model(all_data) if not hyperopt else model(all_data, params=params)
log_probs = (nn.LogSoftmax(dim=-1)(logits)).permute(1, 2, 0)
all_nlls = (
-self.distr_fn(logits=logits.squeeze(-1)).log_prob(all_labels)
if not self.learn_z
else torch.nn.KLDivLoss(reduction="none")(
log_probs,
all_labels.softmax(0).unsqueeze(-1).expand(log_probs.shape),
)
.sum(1)
.T
)
pseudo_nll = (
all_nlls[:, :Nu].matmul(self.N * self.f(self.v, 0)) if Nu > 0 else 0.0
)
data_nll = self.N / Nx * all_nlls[:, Nu:].sum(-1)
sampled_nkl = sum(
m.sampled_nkl()
for m in model.modules()
if (isinstance(m, VILinear) or isinstance(m, VILinearMultivariateNormal))
)
log_weights = -pseudo_nll + sampled_nkl
weights = log_weights.softmax(0)
return weights.mul(data_nll - pseudo_nll).sum() - log_weights.mean()
def inner_elbo(self, model=None, params=None, hyperopt=False):
r"""
Inner VI objective computation [negative ELBO]
"""
logits = model(self.u) if not hyperopt else model(self.u, params=params)
if len(logits.shape)==2:
logits.unsqueeze_(1)
log_probs = (nn.LogSoftmax(dim=-1)(logits)).permute(1, 2, 0)
pseudodata_nll = (
-self.distr_fn(logits=logits.squeeze(-1)).log_prob(self.z)
if not self.learn_z
else torch.nn.KLDivLoss(reduction="none")(
log_probs,
self.z.softmax(0).unsqueeze(-1).expand(log_probs.shape),
)
.sum(1)
.T
).matmul(self.N * self.f(self.v, 0))
kl = sum(
m.kl()
for m in model.modules()
if (isinstance(m, VILinear) or isinstance(m, VILinearMultivariateNormal))
)
return pseudodata_nll.sum() + kl if self.u.shape[0] > 0 else kl
r"""
Optimization methods
"""
def joint_step(self, xbatch, ybatch):
self.optim.zero_grad()
loss = self.psvi_elbo(xbatch, ybatch, model=self.model)
with torch.no_grad():
if self.register_elbos:
self.elbos.append((2, -loss.item()))
loss.backward()
self.optim.step()
return loss
def alternating_step(self, xbatch, ybatch):
for i in range(2):
self.optim = self.optim_net if i == 0 else self.optim_u
self.optim.zero_grad()
loss = self.psvi_elbo(xbatch, ybatch, model=self.model)
with torch.no_grad():
if self.register_elbos:
self.elbos.append(
(1, -loss.item())
) if i == 1 else self.elbos.append((0, -loss.item()))
loss.backward()
self.optim.step()
return loss
def nested_step(self, xbatch, ybatch, truncated=False, K=5):
self.optim_u.zero_grad()
self.optim_net.zero_grad()
if self.learn_v:
self.optim_v.zero_grad()
if self.learn_z:
self.optim_z.zero_grad()
if not truncated:
with innerloop_ctx(self.model, self.optim_net) as (fmodel, diffopt):
for in_it in range(self.inner_it):
mfvi_loss = self.inner_elbo(model=fmodel)
with torch.no_grad():
if self.register_elbos and in_it % self.log_every == 0:
self.elbos.append((1, -mfvi_loss.item()))
diffopt.step(mfvi_loss)
psvi_loss = self.psvi_elbo(xbatch, ybatch, model=fmodel)
with torch.no_grad():
if self.register_elbos:
self.elbos.append((0, -psvi_loss.item()))
psvi_loss.backward()
else:
inner_opt = torch.optim.Adam(list(self.model.parameters()), 1e-4)
for in_it in range(self.inner_it - K):
mfvi_loss = self.inner_elbo(model=self.model)
with torch.no_grad():
if self.register_elbos and in_it % self.log_every == 0:
self.elbos.append((1, -mfvi_loss.item()))
mfvi_loss.backward()
inner_opt.step()
print('done non-differentiable part')
inner_opt.zero_grad()
with innerloop_ctx(self.model, self.optim_net) as (fmodel, diffopt):
for in_it in range(K):
mfvi_loss = self.inner_elbo(model=fmodel)
with torch.no_grad():
if self.register_elbos and in_it % self.log_every == 0:
self.elbos.append((1, -mfvi_loss.item()))
diffopt.step(mfvi_loss)
psvi_loss = self.psvi_elbo(xbatch, ybatch, model=fmodel)
with torch.no_grad():
if self.register_elbos:
self.elbos.append((0, -psvi_loss.item()))
psvi_loss.backward()
self.optim_u.step()
if self.learn_v:
self.optim_v.step()
if not self.parameterised:
with torch.no_grad():
torch.clamp_(
self.v, min=0.0
) # clamp weights of coreset data point to be non-negative
if self.scheduler_optim_net:
self.scheduler_optim_net.step()
if self.learn_z:
self.optim_z.step()
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(list(fmodel.parameters())),
self.model.parameters(),
)
return psvi_loss
def hyper_step(
self,
xbatch,
ybatch,
T=50, # iterations for inner problem solver
inner_opt_class=DifferentiableAdam, # optimizer type for inner problem solver
K=30, # iterations for linear system solver (in approximate implicit differentiation methods)
linsys_lr=1e-4, # lr for the SGD optimizer used to solve the linear system on the Jacobian-vector products
hypergrad_approx="CG_normaleq",
**kwargs,
):
T = self.inner_it
inner_opt_kwargs = {"step_size": self.optim_net.param_groups[0]["lr"]}
fmodel = monkeypatch(self.model, copy_initial_weights=True)
self.optim_u.zero_grad()
if self.learn_v:
self.optim_v.zero_grad()
if self.learn_z:
raise NotImplementedError
def inner_loop(hparams, params, optim, n_steps, create_graph=False):
params_history = [optim.get_opt_params(params)]
for _ in range(n_steps):
params_history.append(
optim(params_history[-1], hparams, create_graph=create_graph)
)
return params_history
def get_inner_opt(train_loss):
return inner_opt_class(train_loss, **inner_opt_kwargs)
def inner_loss_function(p, hp, hyperopt=True):
if self.learn_v:
self.u, self.v = hp[0], hp[1]
else:
self.u = hp[0]
return self.inner_elbo(model=fmodel, params=p, hyperopt=hyperopt)
def outer_loss_function(p, hp):
if self.learn_v:
self.u, self.v = hp[0], hp[1]
else:
self.u = hp[0]
return self.psvi_elbo(xbatch, ybatch, model=fmodel, params=p, hyperopt=True)
inner_opt = get_inner_opt(inner_loss_function)
params = [p.detach().clone().requires_grad_(True) for p in fmodel.parameters()]
params_history = inner_loop(
[self.u] + [self.v] if self.learn_v else [self.u],
params,
inner_opt,
T,
)
last_param = params_history[-1][: len(params)]
linear_opt = GradientDescent(loss_f=inner_loss_function, step_size=linsys_lr)
if hypergrad_approx == "fixed_point": # fixed-point AID
fixed_point(
last_param,
[self.u] + [self.v] if self.learn_v else [self.u],
K=K,
fp_map=linear_opt,
outer_loss=outer_loss_function,
stochastic=True,
)
elif hypergrad_approx == "CG_normaleq": # CG on normal equations AID
CG_normaleq(
last_param,
[self.u] + [self.v] if self.learn_v else [self.u],
K=K,
fp_map=linear_opt,
outer_loss=outer_loss_function,
set_grad=True,
)
self.optim_u.step()
if self.learn_v:
self.optim_v.step()
if not self.parameterised:
with torch.no_grad():
torch.clamp_(self.v, min=0.0)
ll = outer_loss_function(last_param, [self.u] + [self.v])
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(last_param),
self.model.parameters(),
)
return ll.item()
def set_up_model(self):
r"""
Specify the statistical model
"""
print("SETTING UP THE MODEL \n\n")
if self.logistic_regression:
self.model = nn.Sequential(
VILinear(
self.D, self.nc, init_sd=self.init_sd, mc_samples=self.mc_samples
),
).to(self.device)
elif self.architecture=="logistic_regression_fullcov":
self.model = nn.Sequential(
VILinearMultivariateNormal(
self.D, self.nc, init_sd=self.init_sd, mc_samples=self.mc_samples
),
).to(self.device)
elif self.architecture in {"fn", "residual_fn"}:
self.model = make_fcnet(
self.D,
self.n_hidden,
self.nc,
n_layers=self.n_layers,
linear_class=VILinear,
nonl_class=nn.ReLU,
mc_samples=self.mc_samples,
residual=(self.architecture == "residual_fn"),
init_sd=self.init_sd,
).to(self.device)
elif self.architecture == "fn2":
print(f"architecture : {self.architecture}")
self.model = make_fc2net(
self.D,
self.n_hidden,
self.nc, # does not support argument on the number of channels
linear_class=VILinearMultivariateNormal,
nonl_class=nn.ReLU,
mc_samples=self.mc_samples,
init_sd=self.init_sd,
).to(self.device)
elif self.architecture == "lenet":
self.model = make_lenet(
linear_class=VILinear,
nonl_class=nn.ReLU,
mc_samples=self.mc_samples,
init_sd=self.init_sd,
).to(self.device)
elif self.architecture == "alexnet":
self.model = make_alexnet(
linear_class=VILinear,
nonl_class=nn.ReLU,
mc_samples=self.mc_samples,
init_sd=self.init_sd,
).to(self.device)
elif self.architecture == "regressor_net":
self.model = make_regressor_net(
self.D,
self.n_hidden,
self.nc,
linear_class=VILinear,
nonl_class=nn.ReLU,
mc_samples=self.mc_samples,
residual=(self.architecture == "residual_fn"),
init_sd=self.init_sd,
).to(self.device)
def run_psvi(
self,
init_args="subsample",
trainer="nested",
n_layers=1,
logistic_regression=True,
n_hidden=None,
architecture=None,
log_every=10,
inner_it=10,
data_minibatch=None,
lr0net=1e-3,
lr0u=1e-3,
lr0joint=1e-3,
lr0v=1e-2,
lr0z=1e-2,
init_sd=1e-3,
num_epochs=1000,
log_pseudodata=False,
prune_idx=0,
increment_idx=0,
gamma=1.0,
**kwargs,
):
r"""
Run inference
"""
# experiment-specific hyperparameters
self.init_args = init_args
self.trainer = trainer
self.logistic_regression = logistic_regression
self.architecture, self.n_hidden, self.n_layers, self.init_sd = (
architecture,
n_hidden,
n_layers,
init_sd,
)
self.log_every, self.log_pseudodata = log_every, log_pseudodata
self.data_minibatch = data_minibatch
self.inner_it, self.num_epochs = inner_it, num_epochs
self.scheduler_optim_net = None
self.gamma = gamma
epoch_quarter = (self.N // self.data_minibatch) // 4
scheduler_kwargs = {
"step_size": epoch_quarter if epoch_quarter > 0 else 10000,
"gamma": self.gamma,
}
# load the training and test data on dataloaders
self.train_loader = DataLoader(
self.train_dataset,
batch_size=self.data_minibatch,
pin_memory=True,
shuffle=True,
)
self.test_loader = DataLoader(
self.test_dataset,
batch_size=self.data_minibatch,
pin_memory=True,
shuffle=False,
)
# setup for training and test sets in incremental learning: we start with 2 classes, and keep adding 1 new class at a time
if self.increment:
self.incremental_train_datasets, self.incremental_test_datasets = [None]*(self.nc - 1), [None]*(self.nc - 1)
for c in range(1, self.nc):
self.incremental_train_datasets[c-1] = self.train_dataset.subset_where(cs=list(range(c+1)) if c==1 else [c])
self.incremental_test_datasets[c-1] = self.test_dataset.subset_where(cs=list(range(c+1)))
(self.train_loader, self.test_loader) = (make_dataloader(self.incremental_train_datasets[0], self.data_minibatch),
make_dataloader(self.incremental_test_datasets[0], self.data_minibatch, shuffle=False))
self.train_data_so_far = len(self.train_loader.dataset)
self.nc = 2 # in the incremental learning case start with a 2-class classification problem
self.set_up_model()
# initialization of results data structures
(
nlls_psvi,
accs_psvi,
core_idcs_psvi,
iws_entropy,
nesses,
vs_entropy,
us,
zs,
vs,
grid_preds,
times,
) = ([], [], [], [], [], [], [], [], [], [], [0])
# initialization of pseudodata
pseudodata_init = {
"random": self.pseudo_rand_init, # different transformations applied on `train_dataset`
"subsample": self.pseudo_subsample_init,
}
pseudodata_init[self.init_args]()
# optimization method
self.optim_net, self.optim_u = (
torch.optim.Adam(list(self.model.parameters()), lr0net),
torch.optim.Adam([self.u], lr0u),
)
self.scheduler_optim_net = torch.optim.lr_scheduler.StepLR(
self.optim_net, **scheduler_kwargs
)
if self.learn_v:
self.optim_v = torch.optim.Adam([self.v], lr0v)
if self.learn_z:
self.optim_z = torch.optim.Adam([self.z], lr0z)
optimizers = {
"alternating": self.alternating_step,
"nested": self.nested_step,
"hyper": self.hyper_step,
}
if self.trainer == "joint":
variational_params = (
list(self.model.parameters()) + [self.u] + [self.v]
if self.learn_v
else list(self.model.parameters()) + [self.u]
)
self.optim = torch.optim.Adam(variational_params, lr0joint)
psvi_step = self.joint_step
else:
psvi_step = optimizers[self.trainer]
t_start = time.time()
# training loop
total_checkpts = list(range(self.num_epochs))[::log_every]
downsample = 1 # downsample checkpoints for logging predictive uncertainty over a grid
lpit = total_checkpts[::downsample]
for it in tqdm(range(self.num_epochs)):
xbatch, ybatch = next(iter(self.train_loader))
xbatch, ybatch = xbatch.to(self.device, non_blocking=True), ybatch.to(
self.device, non_blocking=True
)
# evaluation
if it % self.log_every == 0:
test_acc, test_nll, iw_ent, ness, v_ent = self.evaluate()
if (
self.log_pseudodata
and it in lpit
and self.dnm not in {"MNIST", "Cifar10", "adult", "phishing", "webspam"}
):
print(f"\nlogging predictive grid at {it}")
grid_preds.append(self.pred_on_grid().detach().cpu().numpy().T)
with torch.no_grad():
nlls_psvi.append(test_nll.item())
accs_psvi.append(test_acc.item())
print(f"\npredictive accuracy: {(100*test_acc.item()):.2f}%")
core_idcs_psvi.append(self.num_pseudo)
times.append(times[-1] + time.time() - t_start)
vs.append((self.f(self.v, 0)).clone().cpu().detach().numpy())
if iw_ent is not None:
iws_entropy.append(iw_ent.item())
if ness is not None:
nesses.append(ness.item())
if v_ent is not None:
vs_entropy.append(v_ent.item())
if self.log_pseudodata:
us.append(self.u.clone().cpu().detach().numpy())
zs.append(self.z.clone().cpu().detach().numpy())
# variational nn reinitialization
if self.reset and it % self.reset_interval == 0:
self.weight_reset()
# take a single optimization step
psvi_step(xbatch, ybatch)
# prune coreset to smaller sizes
if self.prune and it > 0 and it % self.prune_interval == 0:
if prune_idx < len(self.prune_sizes):
self.prune_coreset(
to_size=self.prune_sizes[prune_idx], lr0v=lr0v, lr0net=lr0net
)
prune_idx += 1
self.weight_reset()
# reset model upon pruning
# add new learning task and increment coreset to enable fitting it
if self.increment and it > 0 and it % self.increment_interval == 0:
if increment_idx < len(self.increment_sizes)-1:
# self.historical_coresets.append({'v': self.v, 'u':self.u, 'z':self.z})
increment_idx += 1
#samples_from_coresets = [torch.multinomial(self.f(self.historical_coresets[_i]['v'], 0), self.train_data_so_far//increment_idx, replacement=True) for _i in range(increment_idx)] # sample summarising data from tasks so far using coreset points weighting
samples_from_coreset = torch.multinomial(self.f(self.v, 0), self.train_data_so_far, replacement=True) # sample summarising data from tasks so far using coreset points weighting
self.nc += 1 # added new class in training dataset
self.set_up_model() # reset model
self.increment_coreset(
to_size=self.increment_sizes[increment_idx], lr0v=lr0v, lr0u=lr0u, lr0net=lr0net, new_class=increment_idx+1, increment_idx=increment_idx
)
#self.train_loader = make_dataloader(self.incremental_train_datasets[increment_idx].concatenate(torch.cat([self.historical_coresets[_i]['u'][samples_from_coresets[_i]].clone().detach() for _i in range(increment_idx)], axis=0),
# torch.cat([self.historical_coressets[_i]['z'][samples_from_coresets[_i]].clone().detach() for _i in range(increment_idx)], axis=0)),
# self.data_minibatch) # augment with new training data
self.train_loader = make_dataloader(self.incremental_train_datasets[increment_idx].concatenate(self.u[samples_from_coreset].clone().detach(),
self.z[samples_from_coreset].clone().detach()),
self.data_minibatch) # augment with new training data
self.test_loader = make_dataloader(self.incremental_test_datasets[increment_idx], self.data_minibatch, shuffle=False) # augment with new test data
self.train_data_so_far = len(self.train_loader.dataset)
# retrain restricting only on coreset datapoints
if self.retrain_on_coreset:
print("\n\nRetrain on the extracted coreset for the same number of epochs")
self.weight_reset()
self.optim_retrain = torch.optim.Adam(list(self.model.parameters()), lr0joint)
for it in tqdm(range(self.num_epochs)):
# evaluation
if it % self.log_every == 0:
test_acc, test_nll, iw_ent, ness, v_ent = self.evaluate(correction=False)
if (
self.log_pseudodata
and it in lpit
and self.dnm not in {"MNIST", "Cifar10", "adult", "phishing", "webspam"}
):
print(f"\nlogging predictive grid at {it}")
grid_preds.append(self.pred_on_grid(correction=False).detach().cpu().numpy().T)
with torch.no_grad():
nlls_psvi.append(test_nll.item())
accs_psvi.append(test_acc.item())
print(f"\npredictive accuracy: {(100*test_acc.item()):.2f}%")
core_idcs_psvi.append(self.num_pseudo)
times.append(times[-1] + time.time() - t_start)
vs.append((self.f(self.v, 0)).clone().cpu().detach().numpy())
if iw_ent is not None:
iws_entropy.append(iw_ent.item())
if ness is not None:
nesses.append(ness.item())
if v_ent is not None:
vs_entropy.append(v_ent.item())
if self.log_pseudodata:
us.append(self.u.clone().cpu().detach().numpy())
zs.append(self.z.clone().cpu().detach().numpy())
self.optim_retrain.zero_grad()
loss = self.inner_elbo(model=self.model)
loss.backward()
self.optim_retrain.step()
# store results
self.results["accs"] = accs_psvi
self.results["nlls"] = nlls_psvi
self.results["csizes"] = core_idcs_psvi
self.results["times"] = times[1:]
self.results["elbos"] = self.elbos
self.results["went"] = iws_entropy
self.results["ness"] = nesses
self.results["vent"] = vs_entropy
self.results["vs"] = vs
if self.log_pseudodata:
self.results["us"], self.results["zs"], self.results["grid_preds"] = (
us,
zs,
grid_preds,
)
return self.results
## Compute predictive metrics
def evaluate(
self,
correction=True,
**kwargs,
):
assert self.mc_samples > 1
total, test_nll, corrects = 0, 0, 0
for xt, yt in self.test_loader:
xt, yt = xt.to(self.device, non_blocking=True), yt.to(
self.device, non_blocking=True
)
with torch.no_grad():
all_data = torch.cat((self.u, xt))
all_logits = self.model(all_data)
pseudo_logits = all_logits[:, : self.num_pseudo]
log_probs = (nn.LogSoftmax(dim=-1)(pseudo_logits)).permute(1, 2, 0)
pseudo_nll = (
(
(
self.distr_fn(logits=pseudo_logits).log_prob(self.z)
if not self.learn_z
else torch.nn.KLDivLoss(reduction="none")(
log_probs,
self.z.softmax(0).unsqueeze(-1).expand(log_probs.shape),
).sum((1, 2))
).matmul(self.N * self.f(self.v, 0))
)
if self.num_pseudo > 0
else 0.0
)
test_data_logits = all_logits[:, self.num_pseudo :]
sampled_nkl = sum(
m.sampled_nkl()
for m in self.model.modules()
if (
isinstance(m, VILinear)
or isinstance(m, VILinearMultivariateNormal)
)
)
log_weights = -pseudo_nll + sampled_nkl
weights = log_weights.softmax(0)
test_probs = (
(
test_data_logits.softmax(-1)
.mul(weights.unsqueeze(-1).unsqueeze(-1))
.sum(0)
)
if correction
else test_data_logits.softmax(-1).mean(0)
)
corrects += test_probs.argmax(-1).float().eq(yt).float().sum()
total += yt.size(0)
test_nll += -self.distr_fn(probs=test_probs).log_prob(yt).sum()
iw_entropy = (
-weights[weights > 0].log().mul(weights[weights > 0]).sum()
if self.compute_weights_entropy
else None
) # entropy of the importance weighting distribution
ness = (
weights.sum().square() / weights.square().sum() / weights.shape[0]
) # normalized effective sample size
vs = self.f(self.v, 0)
v_entropy = (
vs.sum().square()
/ vs.square().sum()
/ self.num_pseudo # normalize entropy with coreset size
if self.compute_weights_entropy
else None
)
return (
corrects / float(total),
test_nll / float(total),
iw_entropy,
ness,
v_entropy,
)
def weight_reset(self):
r"""
Reset variational parameters to initialization
"""
for layer in self.model.modules():
if (
isinstance(layer, VILinear)
or isinstance(layer, VILinearMultivariateNormal)
) and hasattr(layer, "reset_parameters_variational"):
layer.reset_parameters_variational()
elif (
isinstance(layer, nn.Conv2d)
or (
isinstance(layer, VILinear)
or isinstance(layer, VILinearMultivariateNormal)
)
and hasattr(layer, "reset_parameters")
):
layer.reset_parameters()
def pred_on_grid(
self,
n_test_per_dim=250,
correction=True,
**kwargs,
):
r"""
Predictions over a 2-d grid for visualization of predictive posterior on 2-d synthetic datasets
"""
_x0_test = torch.linspace(-3, 4, n_test_per_dim)
_x1_test = torch.linspace(-2, 3, n_test_per_dim)
x_test = torch.stack(torch.meshgrid(_x0_test, _x1_test), dim=-1).to(self.device)
with torch.no_grad():
all_data = torch.cat((self.u, x_test.view(-1, 2)))
all_logits = self.model(all_data).squeeze(-1)
pseudo_nll = (
(
self.distr_fn(logits=all_logits[:, : self.num_pseudo])
.log_prob(self.z)
.matmul(self.N * self.f(self.v, 0))
)
if self.num_pseudo > 0
else 0.0
)
grid_data_logits = all_logits[:, self.num_pseudo :]
sampled_nkl = sum(
m.sampled_nkl()
for m in self.model.modules()
if (
isinstance(m, VILinear) or isinstance(m, VILinearMultivariateNormal)
)
)
log_weights = -pseudo_nll + sampled_nkl
weights = log_weights.softmax(0)
grid_probs = (
(
grid_data_logits.softmax(-1)
.mul(weights.unsqueeze(-1).unsqueeze(-1))
.sum(0)
)
if correction
else grid_data_logits.softmax(-1).mean(0)
)
return grid_probs
def prune_coreset(
self,
to_size,
lr0v=1e-3,
lr0net=1e-4,
): # designed to work only for the fixed u methods
r"""
Prune coreset to a given smaller size
"""
self.num_pseudo = to_size
keep_v = torch.multinomial(self.f(self.v, 0), to_size, replacement=False) # torch.topk(self.v, to_size)
self.v = torch.zeros_like(self.v[keep_v]).clone().detach().requires_grad_(True)
self.optim_v = torch.optim.Adam([self.v], lr0v)
self.u = torch.index_select(self.u, 0, keep_v)
self.z = torch.index_select(self.z, 0, keep_v)
self.optim_net = torch.optim.Adam(list(self.model.parameters()), lr0net)
def increment_coreset(
self,
to_size,
lr0v=1e-3,
lr0u=1e-3,
lr0net=1e-4,
variance=1., # variance for random initialization of coreset for new class
new_class=2,
increment_idx=1,
):
r"""
Increment coreset to a given larger size
"""
self.num_pseudo, num_extra_points = to_size, to_size - len(self.v)
extra_weights = torch.ones(num_extra_points, device=self.device)
self.v = torch.cat(( self.v, 1. / (len(self.v) + num_extra_points) * self.v.sum() * extra_weights )).detach().requires_grad_(True)
self.optim_v = torch.optim.Adam([self.v], lr0v)
(new_us, new_zs) = (
((compute_empirical_mean(self.train_loader) + variance * torch.randn(num_extra_points, self.D)).clone(), new_class * torch.ones(num_extra_points))
if self.init_args == "random"
else self.incremental_train_datasets[increment_idx][torch.randperm(len(self.incremental_train_datasets[increment_idx]))[:num_extra_points]])
self.u, self.z = torch.cat((self.u, new_us)).detach().requires_grad_(True), torch.cat((self.z, new_zs))
self.optim_u = torch.optim.Adam([self.u], lr0u)
self.optim_net = torch.optim.Adam(list(self.model.parameters()), lr0net)
class PSVILearnV(PSVI):
r"""
PSVI
- with learnable v on a simplex (with constant sum constraint)
"""
def __init__(self, learn_v=True, parameterised=True, **kwargs):
super().__init__(**kwargs)
self.learn_v, self.parameterised = learn_v, parameterised
with torch.no_grad():
self.v = torch.zeros(self.num_pseudo, device=self.device)
self.v.requires_grad_(
True
) # initialize learnable weights of coreset pseudodata to uniform
self.f = (
torch.softmax
) # transform v via softmax to keep the sum over the pseudodata fixed
class PSVI_No_Rescaling(PSVI):
r"""
PSVI
- with no fixed or learnable coefficients on coreset datapoints whatsoever
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.v *= (
1.0 / self.N
) # we remove log-likelihood rescaling dependency on the true dataset size N
class PSVIFreeV(PSVI):
r"""
PSVI
- with learnable v (subject only to non-negativity constraints)
"""
def __init__(self, learn_v=True, **kwargs):
super().__init__(**kwargs)
self.learn_v = True
self.v.requires_grad_(True)
class PSVI_Ablated(PSVILearnV):
r"""
PSVI
- with ablated importance sampling from coreset variational posterior
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def psvi_elbo(self, xbatch, ybatch, model=None, params=None, hyperopt=False):
r"""
Ablated PSVI objective computation
"""
Nx = xbatch.shape[0]
logits = model(xbatch) if not hyperopt else model(xbatch, params=params)
nlls = -self.distr_fn(logits=logits.squeeze(-1)).log_prob(ybatch)
data_nll = self.N / Nx * nlls.sum(-1) # multi-sample training
sampled_nkl = sum(
m.sampled_nkl() for m in model.modules() if isinstance(m, VILinear)
)
return data_nll.mean() - sampled_nkl.mean()
class PSVI_No_IW(PSVI_Ablated):
r"""
PSVI
- with single-sample training / multi-sample testing
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.mc_samples = 1
def evaluate(
self,
correction=True,
mc_samples_eval=5,
mc_samples_train=1,
**kwargs,
):
r"""
Compute predictive metrics
"""
with torch.no_grad():
self.mc_samples = mc_samples_eval
set_mc_samples(
self.model, self.mc_samples
) # set to multi-sample for testing
test_acc, test_nll, iw_entropy, ness, v_entropy = super().evaluate(
correction=True,
**kwargs,
)
self.mc_samples = 1
set_mc_samples(
self.model, mc_samples_train
) # set to single-sample for training
return test_acc, test_nll, iw_entropy, ness, v_entropy
def pred_on_grid(
self,
correction=True,
n_test_per_dim=250,
mc_samples_eval=5,
mc_samples_train=1,
**kwargs,
):
r"""
Predictions over a 2-d grid for visualization of predictive posterior on 2-d synthetic datasets
"""
# TODO: fix for correction via importance weighting
with torch.no_grad():
self.mc_samples = mc_samples_eval
set_mc_samples(
self.model, self.mc_samples
) # set to multi-sample for testing
test_probs = super().pred_on_grid(
correction=correction,
n_test_per_dim=n_test_per_dim,
**kwargs,
)
self.mc_samples = mc_samples_train
set_mc_samples(
self.model, mc_samples_train
) # set to single-sample for training
return test_probs
class PSVIAV(PSVILearnV):
r"""
PSVI subclass with
- learnable coreset point weights on a simplex,
- learnable rescaling of total coreset evidence
"""
def __init__(self, learn_v=True, **kwargs):
super().__init__(**kwargs)
self.alpha = torch.tensor([0.0], device=self.device)
self.alpha.requires_grad_(True)
self.f = lambda *x: (
torch.exp(self.alpha) * torch.softmax(x[0], x[1])
) # transform v via softmax to keep the sum over the pseudodata fixed and multiply by a learnable non-negative coefficient
self.optim_alpha = torch.optim.Adam([self.alpha], self.lr0alpha)
self.results["alpha"] = []
def evaluate(self, **kwargs):
self.results["alpha"].append(
self.alpha.clone()
.cpu()
.detach()
.numpy() # store the extra variational parameter
)
return super().evaluate(**kwargs)
def increment_coreset(self, lr0alpha=1e-3, **kwargs):
super().increment_coreset(**kwargs)
self.optim_alpha = torch.optim.Adam([self.alpha], lr0alpha)
def hyper_step(
self,
xbatch,
ybatch,
T=10, # iterations for inner problem solver
inner_opt_class=DifferentiableAdam, # optimizer type for inner problem solver
K=10, # iterations for linear system solver (in approximate implicit differentiation methods)
linsys_lr=1e-1, # lr for the SGD optimizer used to solve the linear system on the Jacobian-vector products
hypergrad_approx="CG_normaleq",
**kwargs,
):
T = self.inner_it
inner_opt_kwargs = {"step_size": self.optim_net.param_groups[0]["lr"]}
fmodel = monkeypatch(self.model, copy_initial_weights=True)
self.optim_u.zero_grad()
self.optim_v.zero_grad()
self.optim_alpha.zero_grad()
if self.optim_z:
raise NotImplementedError
def inner_loop(hparams, params, optim, n_steps, create_graph=False):
params_history = [optim.get_opt_params(params)]
for _ in range(n_steps):
params_history.append(
optim(params_history[-1], hparams, create_graph=create_graph)
)
return params_history
def get_inner_opt(train_loss):
return inner_opt_class(train_loss, **inner_opt_kwargs)
def inner_loss_function(p, hp, hyperopt=True):
self.u, self.v, self.alpha = hp[0], hp[1], hp[2]
return self.inner_elbo(model=fmodel, params=p, hyperopt=hyperopt)
def outer_loss_function(p, hp):
self.u, self.v, self.alpha = hp[0], hp[1], hp[2]
return self.psvi_elbo(xbatch, ybatch, model=fmodel, params=p, hyperopt=True)
inner_opt = get_inner_opt(inner_loss_function)
params = [p.detach().clone().requires_grad_(True) for p in fmodel.parameters()]
params_history = inner_loop(
[self.u] + [self.v] + [self.alpha],
params,
inner_opt,
T,
)
last_param = params_history[-1][: len(params)]
linear_opt = GradientDescent(
loss_f=inner_loss_function, step_size=linsys_lr
) # GradientDescent(loss_f=inner_loss_function, step_size=linsys_lr)
if hypergrad_approx == "fixed_point": # fixed-point AID
fixed_point(
last_param,
[self.u] + [self.v] + [self.alpha],
K=K,
fp_map=linear_opt,
outer_loss=outer_loss_function,
stochastic=True,
)
elif hypergrad_approx == "CG_normaleq": # CG on normal equations AID
CG_normaleq(
last_param,
[self.u] + [self.v] + [self.alpha],
K=K,
fp_map=linear_opt,
outer_loss=outer_loss_function,
set_grad=True,
)
self.optim_u.step()
if self.learn_v:
self.optim_v.step()
self.optim_alpha.step()
ll = outer_loss_function(last_param, [self.u] + [self.v] + [self.alpha])
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(last_param),
self.model.parameters(),
)
return ll.item()
def nested_step(self, xbatch, ybatch):
self.optim_u.zero_grad()
self.optim_net.zero_grad()
self.optim_alpha.zero_grad()
if self.learn_v:
self.optim_v.zero_grad()
if self.learn_z:
self.optim_z.zero_grad()
with innerloop_ctx(self.model, self.optim_net) as (fmodel, diffopt):
for in_it in range(self.inner_it):
mfvi_loss = self.inner_elbo(model=fmodel)
with torch.no_grad():
if self.register_elbos and in_it % self.log_every == 0:
self.elbos.append((1, -mfvi_loss.item()))
diffopt.step(mfvi_loss)
psvi_loss = self.psvi_elbo(xbatch, ybatch, model=fmodel)
with torch.no_grad():
if self.register_elbos:
self.elbos.append((0, -psvi_loss.item()))
psvi_loss.backward()
self.optim_u.step()
if self.learn_v:
self.optim_v.step()
self.optim_alpha.step()
if self.learn_z:
self.optim_z.step()
if self.scheduler_optim_net:
self.scheduler_optim_net.step()
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(list(fmodel.parameters())),
self.model.parameters(),
)
return psvi_loss
class PSVIFixedU(PSVILearnV):
r"""
PSVI subclass
- with fixed coreset point locations
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def nested_step(self, xbatch, ybatch):
self.u.requires_grad_(False)
self.optim_net.zero_grad()
if self.learn_v:
self.optim_v.zero_grad()
with innerloop_ctx(self.model, self.optim_net) as (fmodel, diffopt):
for in_it in range(self.inner_it):
mfvi_loss = self.inner_elbo(model=fmodel)
with torch.no_grad():
if self.register_elbos and in_it % self.log_every == 0:
self.elbos.append((1, -mfvi_loss.item()))
diffopt.step(mfvi_loss)
psvi_loss = self.psvi_elbo(xbatch, ybatch, model=fmodel)
with torch.no_grad():
if self.register_elbos:
self.elbos.append((0, -psvi_loss.item()))
psvi_loss.backward()
if self.learn_v:
self.optim_v.step()
if self.scheduler_optim_net:
self.scheduler_optim_net.step()
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(list(fmodel.parameters())),
self.model.parameters(),
)
return psvi_loss
def hyper_step(
self,
xbatch,
ybatch,
T=20, # iterations for inner problem solver
inner_opt_class=DifferentiableAdam, # optimizer type for inner problem solver
K=20, # iterations for linear system solver (in approximate implicit differentiation methods)
linsys_lr=1e-3, # lr for the SGD optimizer used to solve the linear system on the Jacobian-vector products
hypergrad_approx="CG_normaleq",
**kwargs,
):
self.u.requires_grad_(False)
T = self.inner_it
inner_opt_kwargs = {"step_size": self.optim_net.param_groups[0]["lr"]}
fmodel = monkeypatch(self.model, copy_initial_weights=True)
if self.learn_v:
self.optim_v.zero_grad()
def inner_loop(hparams, params, optim, n_steps, create_graph=False):
params_history = [optim.get_opt_params(params)]
for _ in range(n_steps):
params_history.append(
optim(params_history[-1], hparams, create_graph=create_graph)
)
return params_history
def get_inner_opt(train_loss):
return inner_opt_class(train_loss, **inner_opt_kwargs)
def inner_loss_function(p, hp, hyperopt=True):
if self.learn_v:
self.v = hp[0]
else:
pass
return self.inner_elbo(model=fmodel, params=p, hyperopt=hyperopt)
def outer_loss_function(p, hp):
if self.learn_v:
self.v = hp[0]
else:
pass
return self.psvi_elbo(xbatch, ybatch, model=fmodel, params=p, hyperopt=True)
inner_opt = get_inner_opt(inner_loss_function)
params = [p.detach().clone().requires_grad_(True) for p in fmodel.parameters()]
params_history = inner_loop(
[self.v] if self.learn_v else None,
params,
inner_opt,
T,
)
last_param = params_history[-1][: len(params)]
fp_map = DifferentiableAdam(
loss_f=inner_loss_function, step_size=linsys_lr
) # GradientDescent(loss_f=inner_loss_function, step_size=linsys_lr)
if hypergrad_approx == "fixed_point": # fixed-point AID
fixed_point(
last_param,
[self.v] if self.learn_v else None,
K=K,
fp_map=fp_map,
outer_loss=outer_loss_function,
stochastic=True,
)
elif hypergrad_approx == "CG_normaleq": # CG on normal equations AID
CG_normaleq(
last_param,
[self.v] if self.learn_v else None,
K=K,
fp_map=fp_map,
outer_loss=outer_loss_function,
set_grad=True,
)
if self.learn_v:
self.optim_v.step()
ll = outer_loss_function(last_param, [self.v])
if self.scheduler_optim_net:
self.scheduler_optim_net.step()
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(last_param),
self.model.parameters(),
)
return ll.item()
class PSVIAFixedU(PSVILearnV):
r"""
PSVI subclass with
- fixed coreset point locations
- learnable coreset weights on a simplex
- learnable rescaling of total coreset evidence
"""
def __init__(self, learn_v=True, **kwargs):
super().__init__(**kwargs)
self.alpha = torch.tensor([0.0], device=self.device)
self.alpha.requires_grad_(True)
self.f = lambda *x: (
torch.exp(self.alpha) * torch.softmax(x[0], x[1])
) # transform v via softmax to keep the sum over the pseudodata fixed and multiply by a learnable non-negative coefficient
self.optim_alpha = torch.optim.Adam([self.alpha], self.lr0alpha)
self.results["alpha"] = []
def evaluate(self, **kwargs):
self.results["alpha"].append(
self.alpha.clone()
.cpu()
.detach()
.numpy() # store the extra variational parameter
)
return super().evaluate(**kwargs)
def hyper_step(
self,
xbatch,
ybatch,
T=5, # iterations for inner problem solver
inner_opt_class=DifferentiableAdam, # optimizer type for inner problem solver
K=5, # iterations for linear system solver (in approximate implicit differentiation methods)
linsys_lr=1e-1, # lr for the SGD optimizer used to solve the linear system on the Jacobian-vector products
hypergrad_approx="CG_normaleq",
**kwargs,
):
T = self.inner_it
inner_opt_kwargs = {"step_size": self.optim_net.param_groups[0]["lr"]}
fmodel = monkeypatch(self.model, copy_initial_weights=True)
self.optim_v.zero_grad()
self.optim_alpha.zero_grad()
self.u.requires_grad_(False)
def inner_loop(hparams, params, optim, n_steps, create_graph=False):
params_history = [optim.get_opt_params(params)]
for _ in range(n_steps):
params_history.append(
optim(params_history[-1], hparams, create_graph=create_graph)
)
return params_history
def get_inner_opt(train_loss):
return inner_opt_class(train_loss, **inner_opt_kwargs)
def inner_loss_function(p, hp, hyperopt=True):
self.v, self.alpha = hp[0], hp[1]
return self.inner_elbo(model=fmodel, params=p, hyperopt=hyperopt)
def outer_loss_function(p, hp):
self.v, self.alpha = hp[0], hp[1]
return self.psvi_elbo(xbatch, ybatch, model=fmodel, params=p, hyperopt=True)
inner_opt = get_inner_opt(inner_loss_function)
params = [p.detach().clone().requires_grad_(True) for p in fmodel.parameters()]
params_history = inner_loop(
[self.v] + [self.alpha],
params,
inner_opt,
T,
)
last_param = params_history[-1][: len(params)]
linear_opt = GradientDescent(loss_f=inner_loss_function, step_size=linsys_lr)
if hypergrad_approx == "fixed_point": # fixed-point AID
fixed_point(
last_param,
[self.v] + [self.alpha],
K=K,
fp_map=linear_opt,
outer_loss=outer_loss_function,
stochastic=True,
)
elif hypergrad_approx == "CG_normaleq": # CG on normal equations AID
CG_normaleq(
last_param,
[self.v] + [self.alpha],
K=K,
fp_map=linear_opt,
outer_loss=outer_loss_function,
set_grad=True,
)
if self.learn_v:
self.optim_v.step()
self.optim_alpha.step()
ll = outer_loss_function(last_param, [self.v] + [self.alpha])
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(last_param),
self.model.parameters(),
)
return ll.item()
def nested_step(self, xbatch, ybatch):
self.optim_net.zero_grad()
self.optim_alpha.zero_grad()
if self.learn_v:
self.optim_v.zero_grad()
self.u.requires_grad_(False)
with innerloop_ctx(self.model, self.optim_net) as (fmodel, diffopt):
for in_it in range(self.inner_it):
mfvi_loss = self.inner_elbo(model=fmodel)
with torch.no_grad():
if self.register_elbos and in_it % self.log_every == 0:
self.elbos.append((1, -mfvi_loss.item()))
diffopt.step(mfvi_loss)
psvi_loss = self.psvi_elbo(xbatch, ybatch, model=fmodel)
with torch.no_grad():
if self.register_elbos:
self.elbos.append((0, -psvi_loss.item()))
psvi_loss.backward()
if self.learn_v:
self.optim_v.step()
self.optim_alpha.step()
if self.scheduler_optim_net:
self.scheduler_optim_net.step()
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(list(fmodel.parameters())),
self.model.parameters(),
)
return psvi_loss
## PSVI subclass supporting regression
class PSVI_regressor(PSVI):
def __init__(
self,
u=None, # pseudo x-coordinates
z=None, # pseudo y-coordinates
train_dataset=None, # true training data
val_dataset=None,
test_dataset=None, # test data
y_mean=None,
y_std=None,
N=None, # size of training data
D=None, # dimensionality of training data
optim=None, # joint variational model/pseudodata optimizer
optim_u=None, # optimizer for pseudodata
optim_net=None, # optimizer for variational model parameters
optim_v=None, # optimizer for log-likelihood rescaling vector
optim_z=None, # optimizer for outputs on distilled data
register_elbos=False, # register values of objectives over inference
num_pseudo=None, # number of pseudodata
seed=0, # random seed for instantiation of the method (for reproducibility)
compute_weights_entropy=True, # compute the entropy of weights distribution used in importance sampling
mc_samples=None, # number of MC samples for computation of variational objectives and predictions on unseen data
learn_v=False, # boolean indicating if the v vector is learnable
f=lambda *x: x[0], # transformation applied on the v vector
dnm=None, # dataset name
nc=1, # dimension of output space
init_dataset=None, # populated when picking initializations from a disturbed version of the original datapoints
parameterised=False,
learn_z=True, # optimize in the label space
lr0alpha=1e-3,
tau=0.1,
logistic_regression=False,
**kwargs,
):
np.random.seed(seed), torch.manual_seed(seed)
print(f'device id {device_id} ')
self.device = torch.device( f"cuda:{device_id}" if device_id else ("cuda" if torch.cuda.is_available() else "cpu"))
self.u, self.z = u, z
self.train_dataset, self.val_dataset, self.test_dataset = (
train_dataset,
val_dataset,
test_dataset,
)
self.logistic_regression = logistic_regression
self.N, self.D, self.dnm = N, D, dnm
self.nc = nc # dimensionality of output
self.distr_fn = partial(gaussian_fn, scale=1.0 / np.sqrt(tau))
(self.optim, self.optim_u, self.optim_net, self.optim_v, self.optim_z,) = (
optim,
optim_u,
optim_net,
optim_v,
optim_z,
)
self.register_elbos, self.compute_weights_entropy = (
register_elbos,
compute_weights_entropy,
)
if self.register_elbos:
self.elbos = []
self.num_pseudo, self.mc_samples = num_pseudo, mc_samples
self.learn_v, self.learn_z = (
learn_v,
learn_z,
)
with torch.no_grad():
self.v = (
1.0 / self.num_pseudo * torch.ones(self.num_pseudo, device=self.device)
)
self.v.requires_grad_(
self.learn_v
) # initialize weights of coreset pseudodata to uniform and set to differentiable or not according to attribute learn_v
self.f, self.parameterised = f, parameterised
self.init_dataset = init_dataset
self.results = {}
self.lr0alpha = lr0alpha
self.y_mean, self.y_std = y_mean, y_std
### Initialization methods for the pseudodata
def pseudo_subsample_init(self):
sample_idcs = random.sample(range(len(self.train_dataset)), self.num_pseudo)
subset_train_dataset = torch.utils.data.Subset(self.train_dataset, sample_idcs)
self.cs_support = DataLoader(
subset_train_dataset,
batch_size=self.num_pseudo,
# pin_memory=True,
shuffle=False,
)
with torch.no_grad():
self.u, self.z = next(iter(self.cs_support))
self.u, self.z = self.u.to(self.device), self.z.to(self.device)
self.u.requires_grad_(True), self.z.requires_grad_(True)
## PSVI objective computation [negative PSVI-ELBO]
def psvi_elbo(self, xbatch, ybatch, model=None, params=None, hyperopt=False):
assert self.mc_samples > 1
Nu, Nx = self.u.shape[0], xbatch.shape[0]
all_xs, all_ys = torch.cat((self.u, xbatch)), torch.cat((self.z, ybatch))
all_nlls = -self.distr_fn(model(all_xs).squeeze(-1)).log_prob(all_ys.squeeze())
pseudo_nll = (
all_nlls[:, :Nu].matmul(self.N * self.f(self.v, 0)) if Nu > 0 else 0.0
)
data_nll = self.N / Nx * all_nlls[:, Nu:].sum(-1)
sampled_nkl = sum(
m.sampled_nkl() for m in model.modules() if isinstance(m, VILinear)
)
log_weights = -pseudo_nll + sampled_nkl
weights = log_weights.softmax(0)
return weights.mul(data_nll - pseudo_nll).sum() - log_weights.mean()
## Inner VI objective computation [negative ELBO]
def inner_elbo(self, model=None, params=None, hyperopt=False):
pseudodata_nll = (
-self.distr_fn(model(self.u).squeeze(-1)).log_prob(self.z.squeeze())
).matmul(self.N * self.f(self.v, 0))
kl = sum(m.kl() for m in model.modules() if isinstance(m, VILinear))
return pseudodata_nll.sum() + kl if self.u.shape[0] > 0 else kl
## Optimization methods
def nested_step(self, xbatch, ybatch):
self.optim_u.zero_grad()
self.optim_net.zero_grad()
if self.learn_v:
self.optim_v.zero_grad()
if self.learn_z:
self.optim_z.zero_grad()
with innerloop_ctx(self.model, self.optim_net) as (fmodel, diffopt):
for in_it in range(self.inner_it):
mfvi_loss = self.inner_elbo(model=fmodel)
with torch.no_grad():
if self.register_elbos and in_it % self.log_every == 0:
self.elbos.append((1, -mfvi_loss.item()))
diffopt.step(mfvi_loss)
psvi_loss = self.psvi_elbo(xbatch, ybatch, model=fmodel)
with torch.no_grad():
if self.register_elbos:
self.elbos.append((0, -psvi_loss.item()))
psvi_loss.backward()
self.optim_u.step()
if self.learn_v:
self.optim_v.step()
if not self.parameterised:
with torch.no_grad():
torch.clamp_(
self.v, min=0.0
) # clamp weights of coreset data point to be non-negative
if self.learn_z:
self.optim_z.step()
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(list(fmodel.parameters())),
self.model.parameters(),
)
return psvi_loss
## Execution of inference
def run_psvi(
self,
init_args="subsample",
trainer="nested",
n_layers=1,
n_hidden=None,
architecture=None,
log_every=10,
inner_it=10,
data_minibatch=None,
lr0net=1e-3,
lr0u=1e-3,
lr0v=1e-2,
lr0z=1e-2,
init_sd=1e-3,
num_epochs=1000,
log_pseudodata=False,
**kwargs,
):
# experiment-specific hyperparameters
self.init_args = init_args
self.trainer = trainer
self.architecture, self.n_hidden, self.n_layers, self.init_sd = (
architecture,
n_hidden,
n_layers,
init_sd,
)
self.log_every, self.log_pseudodata = log_every, log_pseudodata
self.data_minibatch = data_minibatch
self.inner_it, self.num_epochs = inner_it, num_epochs
self.set_up_model()
# initialization of results data structures
(
lls_psvi,
rmses_psvi,
core_idcs_psvi,
iws_entropy,
nesses,
vs_entropy,
us,
zs,
vs,
times,
) = ([], [], [], [], [], [], [], [], [], [0])
# load the training and test data on dataloaders
self.train_loader = DataLoader(
self.train_dataset,
batch_size=self.data_minibatch,
pin_memory=True,
shuffle=True,
)
self.val_loader = DataLoader(
self.val_dataset,
batch_size=self.data_minibatch,
pin_memory=True,
shuffle=False,
)
self.test_loader = DataLoader(
self.test_dataset,
batch_size=self.data_minibatch,
pin_memory=True,
shuffle=False,
)
# initialization of pseudodata
pseudodata_init = {
"subsample": self.pseudo_subsample_init,
}
pseudodata_init[self.init_args]()
# optimization method
self.optim_net, self.optim_u = (
torch.optim.Adam(list(self.model.parameters()), lr0net),
torch.optim.Adam([self.u], lr0u),
)
if self.learn_v:
self.optim_v = torch.optim.Adam([self.v], lr0v)
if self.learn_z:
self.optim_z = torch.optim.Adam([self.z], lr0z)
optimizers = {
"nested": self.nested_step,
}
psvi_step = optimizers[self.trainer]
t_start = time.time()
# training loop
for it in tqdm(range(self.num_epochs)):
xbatch, ybatch = next(iter(self.train_loader))
xbatch, ybatch = xbatch.to(self.device, non_blocking=True), ybatch.to(
self.device, non_blocking=True
)
# evaluation
if it % self.log_every == 0:
test_rmse, test_ll = self.evaluate(**kwargs)
with torch.no_grad():
lls_psvi.append(test_ll.item())
rmses_psvi.append(test_rmse.item())
core_idcs_psvi.append(self.num_pseudo)
times.append(times[-1] + time.time() - t_start)
vs.append((self.f(self.v, 0)).clone().cpu().detach().numpy())
if self.log_pseudodata:
us.append(self.u.clone().cpu().detach().numpy())
zs.append(self.z.clone().cpu().detach().numpy())
# take a single optimization step
outer_loss = psvi_step(xbatch, ybatch)
if it % self.log_every == 0:
print(
f" \n\n\n Predictive rmse {test_rmse.item():.2f} | pred ll {test_ll.item():.2f}| outer loss {outer_loss:.0f}"
)
# store results
self.results["rmses"] = rmses_psvi
self.results["lls"] = lls_psvi
self.results["csizes"] = core_idcs_psvi
self.results["times"] = times[1:]
self.results["went"] = iws_entropy
self.results["ness"] = nesses
self.results["vent"] = vs_entropy
self.results["vs"] = vs
print("rmses : ", ["%.4f" % el for el in self.results["rmses"]])
print("lls : ", ["%.4f" % el for el in self.results["lls"]])
return self.results
## Compute predictive metrics
def evaluate(
self,
correction=True,
**kwargs,
):
def revert_norm(y_pred):
return y_pred * self.y_std + self.y_mean
assert self.mc_samples > 1
total, test_ll, rmses_unnorm = 0, 0, 0
for xt, yt in self.test_loader:
xt, yt = (
xt.to(self.device, non_blocking=True),
yt.to(self.device, non_blocking=True).squeeze(),
)
with torch.no_grad():
all_data = torch.cat((self.u, xt)).squeeze(-1)
model_out = self.model(all_data).squeeze(-1)
pseudo_out = model_out[:, : self.num_pseudo]
pseudo_ll = (
self.distr_fn(pseudo_out)
.log_prob(self.z.squeeze())
.mul(self.N * self.f(self.v, 0))
if self.num_pseudo > 0
else 0.0
).sum()
test_data_out = model_out[:, self.num_pseudo :]
sampled_nkl = sum(
m.sampled_nkl()
for m in self.model.modules()
if isinstance(m, VILinear)
)
log_weights = -pseudo_ll + sampled_nkl
weights = log_weights.softmax(0)
y_pred = torch.matmul(revert_norm(test_data_out).T, weights)
rmses_unnorm += (y_pred - yt).square().sum()
total += yt.size(0)
test_ll += self.distr_fn(y_pred).log_prob(yt.squeeze()).sum()
return (
(rmses_unnorm / float(total)).sqrt(),
test_ll / float(total),
)
## PSVI with learnable v on a simplex (with constant sum constraint)
class PSVILearnV_regressor(PSVI_regressor):
def __init__(self, learn_v=True, parameterised=True, **kwargs):
super().__init__(**kwargs)
self.learn_v, self.parameterised = learn_v, parameterised
with torch.no_grad():
self.v = torch.zeros(self.num_pseudo, device=self.device)
self.v.requires_grad_(
True
) # initialize learnable weights of coreset pseudodata to uniform
self.f = (
torch.softmax
) # transform v via softmax to keep the sum over the pseudodata fixed
## PSVI with learnable v on a simplex and learnable rescaling on total coreset likelihood
class PSVIAV_regressor(PSVILearnV_regressor):
def __init__(self, learn_v=True, **kwargs):
super().__init__(**kwargs)
self.alpha = torch.tensor([0.0], device=self.device)
self.alpha.requires_grad_(True)
self.f = lambda *x: (
torch.exp(self.alpha) * torch.softmax(x[0], x[1])
) # transform v via softmax to keep the sum over the pseudodata fixed and multiply by a learnable non-negative coefficient
self.optim_alpha = torch.optim.Adam([self.alpha], self.lr0alpha)
self.results["alpha"] = []
def evaluate(self, **kwargs):
self.results["alpha"].append(
self.alpha.clone()
.cpu()
.detach()
.numpy() # store the extra variational parameter
)
return super().evaluate(**kwargs)
def nested_step(self, xbatch, ybatch):
self.optim_u.zero_grad()
self.optim_net.zero_grad()
self.optim_alpha.zero_grad()
if self.learn_v:
self.optim_v.zero_grad()
if self.learn_z:
self.optim_z.zero_grad()
with innerloop_ctx(self.model, self.optim_net) as (fmodel, diffopt):
for in_it in range(self.inner_it):
mfvi_loss = self.inner_elbo(model=fmodel)
with torch.no_grad():
if self.register_elbos and in_it % self.log_every == 0:
self.elbos.append((1, -mfvi_loss.item()))
diffopt.step(mfvi_loss)
psvi_loss = self.psvi_elbo(xbatch, ybatch, model=fmodel)
with torch.no_grad():
if self.register_elbos:
self.elbos.append((0, -psvi_loss.item()))
psvi_loss.backward()
self.optim_u.step()
if self.learn_v:
self.optim_v.step()
self.optim_alpha.step()
if self.learn_z:
self.optim_z.step()
if self.scheduler_optim_net:
self.scheduler_optim_net.step()
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(list(fmodel.parameters())),
self.model.parameters(),
)
return psvi_loss
|
Blackbox-Coresets-VI-main
|
psvi/inference/psvi_classes.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.distributions as dist
from psvi.models.neural_net import VILinear
from torch.utils.data import DataLoader
def pseudo_subsample_init(x, y, num_pseudo=20, nc=2, seed=0):
r"""
Initialize on random subsets from each class with approximately equal
"""
torch.manual_seed(seed)
N, _ = x.shape
cnt = 0
u, z = torch.Tensor([]), torch.Tensor([])
for c in range(nc):
idx_c, pts_with_c = (
torch.arange(N)[y == c],
num_pseudo // nc if c < nc - 1 else num_pseudo - cnt,
)
u, z = torch.cat(
(u, x[idx_c[torch.randperm(len(idx_c))[:pts_with_c]]])
), torch.cat((z, c * torch.ones(pts_with_c)))
cnt += num_pseudo // nc
return u.requires_grad_(True), z
def pseudo_rand_init(x, y, num_pseudo=20, nc=2, seed=0, variance=0.1):
r"""
Initialize on noisy means of the observed datapoints and random labels equally split among classes
"""
torch.manual_seed(seed)
_, D = x.shape
u = (
(x[:, :].mean() + variance * torch.randn(num_pseudo, D))
.clone()
.requires_grad_(True)
)
z = torch.Tensor([])
for c in range(nc):
z = torch.cat(
(
z,
c
* torch.ones(
num_pseudo // nc
if c < nc - 1
else num_pseudo - (nc - 1) * (num_pseudo // nc)
),
)
)
return u, z
r"""
Model specific computations for psvi variational objective used to estimate the coreset posterior over black-box sparsevi construction
"""
def elbo(net, u, z, w):
r"""
ELBO computed on (u,z): variational objective for posterior approximation using only the coreset datapoints
"""
pseudo_nll = -dist.Bernoulli(logits=net(u).squeeze(-1)).log_prob(z).matmul(w)
sampled_nkl = sum(m.sampled_nkl() for m in net.modules() if isinstance(m, VILinear))
return (pseudo_nll.sum() - sampled_nkl).sum()
def sparsevi_psvi_elbo(net, x, u, y, z, w, N): # variational objective for
r"""
PSVI-ELBO: variational objective for true data conditioned on coreset data (called in outer optimization of the sparse-bbvi construction)
"""
Nu, Nx = u.shape[0], x.shape[0]
all_data, all_labels = torch.cat((u, x)), torch.cat((z, y))
all_nlls = -dist.Bernoulli(logits=net(all_data).squeeze(-1)).log_prob(all_labels)
pseudo_nll, data_nll = N / Nu * all_nlls[:, :Nu].matmul(w), all_nlls[:, Nu:].sum(-1)
sampled_nkl = sum(m.sampled_nkl() for m in net.modules() if isinstance(m, VILinear))
log_weights = -pseudo_nll + sampled_nkl
weights = log_weights.softmax(-1).squeeze()
return weights.mul(N / Nx * data_nll - pseudo_nll).sum() - log_weights.mean()
def forward_through_coreset(net, u, x, z, y, w):
r"""
Likelihood computations for coreset next datapoint selection step
"""
Nu = u.shape[0]
with torch.no_grad():
all_data, all_labels = torch.cat((u, x)), torch.cat((z, y))
all_lls = dist.Bernoulli(logits=net(all_data).squeeze(-1)).log_prob(all_labels)
core_ll, data_ll = all_lls[:, :Nu], all_lls[:, Nu:]
sampled_nkl = sum(
m.sampled_nkl() for m in net.modules() if isinstance(m, VILinear)
)
log_weights = core_ll.matmul(w) + sampled_nkl
weights = log_weights.softmax(-1).squeeze()
return core_ll.T, data_ll.T, weights
def predict_through_coreset(net, xt, x, y, w=None):
r"""
Importance-weight correction for predictions using the coreset posterior
"""
Ntest = xt.shape[0]
with torch.no_grad():
all_data = torch.cat((xt, x))
all_logits = net(all_data).squeeze(-1)
pnlls = -dist.Bernoulli(logits=all_logits[:, Ntest:]).log_prob(y)
pseudo_nll = pnlls.matmul(w) if w is not None else pnlls.sum(-1)
test_data_logits = all_logits[:, :Ntest]
sampled_nkl = sum(
m.sampled_nkl() for m in net.modules() if isinstance(m, VILinear)
)
log_weights = -pseudo_nll + sampled_nkl
weights = log_weights.softmax(-1).squeeze()
return test_data_logits, weights
def make_dataloader(data, minibatch, shuffle=True):
r"""
Create pytorch dataloader from given dataset and minibatch size
"""
return DataLoader(data, batch_size=minibatch, pin_memory=True, shuffle=shuffle)
def compute_empirical_mean(dloader):
r"""
Compute the mean of the observed data distribution
"""
trainsum, nb_samples = 0., 0. # compute statistics of the training data
for data, _ in dloader:
batch_samples = data.size(0)
data = data.view(batch_samples, data.size(1), -1)
trainsum += data.mean(2).sum(0) # use with caution: might raise overflow for large datasets
nb_samples += batch_samples
return trainsum / nb_samples
def pred_on_grid(
model,
n_test_per_dim=250,
device=None,
**kwargs,
):
r"""
Predictifons over a 2-d grid for visualization of predictive posterior on 2-d synthetic datasets
"""
_x0_test = torch.linspace(-3, 4, n_test_per_dim)
_x1_test = torch.linspace(-2, 3, n_test_per_dim)
x_test = torch.stack(torch.meshgrid(_x0_test, _x1_test), dim=-1).to(device)
with torch.no_grad():
return model(x_test.view(-1, 2)).squeeze(-1).softmax(-1).mean(0)
|
Blackbox-Coresets-VI-main
|
psvi/inference/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
r"""
Incremental variational coreset utilising the PSVI objective
"""
import time
import numpy as np
import torch
import torch.distributions as dist
import torch.nn as nn
from psvi.inference.utils import (
elbo,
forward_through_coreset,
predict_through_coreset,
sparsevi_psvi_elbo,
)
from psvi.models.neural_net import make_fcnet, VILinear
from tqdm import tqdm
def run_sparsevi_with_bb_elbo(
n_layers=1,
logistic_regression=True,
n_hidden=40,
log_every=10,
lr0=1e-3,
register_elbos=False,
seed=0,
**kwargs,
):
r"""
Inremental variational coreset construction, with greedy selection step and coreset points weight vector optimization using our generalized ELBO
"""
saved_args = locals()
print("saved_args is", saved_args)
np.random.seed(seed), torch.manual_seed(seed)
elbos = []
results = {}
num_epochs, inner_it, outer_it = (
kwargs["num_epochs"],
kwargs["inner_it"],
kwargs["outer_it"],
)
x, y, xt, yt, mc_samples, data_minibatch = (
kwargs["x"],
kwargs["y"],
kwargs["xt"],
kwargs["yt"],
kwargs["mc_samples"],
kwargs["data_minibatch"],
)
N, D = x.shape
net = (
nn.Sequential(
VILinear(D, 1, mc_samples=mc_samples),
)
if logistic_regression
else make_fcnet(
D,
n_hidden,
1,
n_layers=n_layers,
linear_class=VILinear,
nonl_class=nn.ReLU,
mc_samples=mc_samples,
)
)
w = (
torch.zeros(N).clone().detach().requires_grad_(True)
) # coreset weights initialised to 0
nlls_sbbvi, accs_sbbvi, core_idcs_sbbvi = [], [], []
optim_net0 = torch.optim.Adam(
list(net.parameters()), lr0
) # optimizer for ELBO on coreset datapoints
optim_w = torch.optim.Adam([w], lr0) # optimizer for PSVI-ELBO
core_idcs = []
times = [0]
t_start = time.time()
# Grow the coreset for num_epochs iterations
for it in tqdm(range(num_epochs)):
# Evaluate coreset posterior
if it % log_every == 0:
with torch.no_grad():
test_data_logits, weights = predict_through_coreset(net, xt, x, y, w)
test_probs = torch.clamp(weights @ (test_data_logits.sigmoid()), max=1)
test_acc = test_probs.gt(0.5).float().eq(yt).float().mean()
test_nll = -dist.Bernoulli(probs=test_probs).log_prob(yt).mean()
nlls_sbbvi.append(test_nll.item())
accs_sbbvi.append(test_acc.item())
print(f"predictive accuracy: {(100*test_acc.item()):.2f}%")
core_idcs_sbbvi.append(len(core_idcs))
times.append(times[-1] + time.time() - t_start)
if kwargs["scatterplot_coreset"]:
if it == num_epochs - 1:
test_data_logits, weights = predict_through_coreset(
net, kwargs["xgrid"], x, y, w
)
test_probs = torch.clamp(weights @ (test_data_logits.sigmoid()), max=1)
r = (
test_probs.reshape(
(
int(np.sqrt(kwargs["xgrid"].shape[0])),
int(np.sqrt(kwargs["xgrid"].shape[0])),
)
),
xt,
kwargs["plot_data"],
kwargs["plot_preds"],
x[w > 0],
y[w > 0],
)
kwargs["plot_classification_with_coreset"](*r, 1, "sparse bbvi")
x_core, y_core = x[core_idcs, :], y[core_idcs]
sub_idcs, sum_scaling = (
np.random.randint(x.shape[0], size=data_minibatch),
x.shape[0] / data_minibatch,
) # sample minibatch when accessing full data and rescale corresponding log-likelihood
# 1. Approximate current coreset posterior via minimizing the ELBO on the coreset support
optim_net0.zero_grad()
for in_it in range(inner_it):
loss = elbo(net, x_core, y_core, w[core_idcs])
if register_elbos and in_it % log_every == 0:
with torch.no_grad():
elbos.append((1, -loss.item()))
loss.backward()
optim_net0.step()
with torch.no_grad():
# 2. Compute loglikelihoods for each sample using samples from the approximation to the coreset posterior
ll_core, ll_data, weights = forward_through_coreset(
net, x_core, x[sub_idcs, :], y_core, y[sub_idcs], w[core_idcs]
)
cll_data, cll_core = ll_data - torch.einsum(
"s, ns ->ns", weights, ll_data
), ll_core - torch.einsum("s, ms ->ms", weights, ll_core)
# 3. Select point to attach to the coreset next via max correlation with residual error
resid = sum_scaling * cll_data.sum(axis=0) - torch.einsum(
"m, ms ->s", w[core_idcs], cll_core
)
corrs = (
cll_data.matmul(resid)
/ torch.sqrt((cll_data**2).sum(axis=1))
/ cll_data.shape[1]
)
corecorrs = (
torch.abs(cll_core.matmul(resid))
/ torch.sqrt((cll_core**2).sum(axis=1))
/ cll_core.shape[1]
if len(core_idcs) > 0
else None
)
if corecorrs is None or corrs.max() > corecorrs.max():
pt_idx = sub_idcs[torch.argmax(torch.max(corrs))]
core_idcs.append(pt_idx) if pt_idx not in core_idcs else None
# 4. Sample for updated weights and take projected gradient descent steps on the weights
x_core, y_core = x[core_idcs, :], y[core_idcs]
sub_idcs, sum_scaling = (
np.random.randint(x.shape[0], size=data_minibatch),
x.shape[0] / data_minibatch,
) # sample minibatch when accessing full data and rescale corresponding log-likelihood
for out_it in range(outer_it):
optim_w.zero_grad()
loss_joint = sparsevi_psvi_elbo(
net, x[sub_idcs, :], x_core, y[sub_idcs], y_core, w[core_idcs], N
)
if register_elbos and out_it % log_every == 0:
with torch.no_grad():
elbos.append((0, -loss_joint.item()))
loss_joint.backward()
optim_w.step()
with torch.no_grad():
torch.clamp_(w, 0)
# store results
results["accs"] = accs_sbbvi
results["nlls"] = nlls_sbbvi
results["csizes"] = core_idcs_sbbvi
results["times"] = times[1:]
results["elbos"] = elbos
return results
|
Blackbox-Coresets-VI-main
|
psvi/inference/sparsebbvi.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All Rights Reserved.
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for making ``torch.nn.Module`` subclass instances stateless."""
import abc as _abc
import typing as _typing
import warnings as _warnings
import weakref as _weakref
from collections import OrderedDict as _OrderedDict
from contextlib import contextmanager as _contextmanager
import torch as _torch
from . import utils as _utils
# ==============================================================================
# Helper functions and attributes for MonkeyPatch modules.
# ==============================================================================
_internal_attrs = {
"_backend",
"_parameters",
"_buffers",
"_backward_hooks",
"_forward_hooks",
"_forward_pre_hooks",
"_state_dict_hooks",
"_load_state_dict_pre_hooks",
"_modules",
}
_BufferType = _typing.Dict[str, _typing.Optional[_torch.Tensor]]
@_contextmanager
def _modify_internally(fmodule):
fmodule._being_modified_internally = True
yield
fmodule._being_modified_internally = False
def _patched_parameters(
self, recurse: bool = True, time: _typing.Optional[int] = None
) -> _typing.Iterable[_torch.Tensor]:
r"""Returns an iterator over monkey patched module fast parameters.
Args:
recurse (bool): if True, then yields fast parameters of this module
and all submodules. Otherwise, this *still* yields parameters of
this module and all submodules, and raises a warning. This keyword
exists only to satisfy API compatibility with
``torch.nn.Module.parameters``.
time (int or None): if None, the most recent fast parameters are
provided. The int provided stands for the number of steps since the
module was created. *Note* that the step counter is incremented
every time parameters are updated, so this may not align with number
of training or evaluations steps.
Yields:
Parameter: module fast weights.
"""
if getattr(self, "_fast_params", None) is None:
raise Exception(
"Tried to get fast weights of a monkey patched module which does "
"not encapsulate fast weights."
)
if not recurse:
_warnings.warn(
"Calling parameters with recurse=False on a monkey patched module "
"still returns all the fast weights of of nested patched modules."
)
time = -1 if time is None else time
if not self.track_higher_grads and time not in (-1, 0):
raise ValueError(
"The patched model is not tracking higher gradients. Only the "
"latest parameters are available."
)
return iter(self._fast_params[time])
class _MonkeyPatchBase(_abc.ABC, _torch.nn.Module):
@_abc.abstractmethod
def __init__(self) -> None:
self._param_mapping: _typing.List[int] = []
self._being_modified_internally: bool = True
self._track_higher_grads: bool = True
def forward(self):
raise NotImplementedError(
"The monkey-patching logic has failed to override self.forward "
"on the new module, or you tried calling forward on a patched "
"version of a module which doesn't have forward (e.g. ModuleList)."
)
def _expand_params(
self, params: _typing.List[_torch.Tensor]
) -> _typing.List[_torch.Tensor]:
expanded = []
for index in self._param_mapping:
expanded.append(params[index])
return expanded
@property
def init_fast_params(self):
if not self.track_higher_grads:
raise Exception(
"Cannot get initial parameters when not tracking higher " "gradients."
)
return self._fast_params[0]
@property
def fast_params(self):
return None if self._fast_params is None else self._fast_params[-1]
@fast_params.setter
def fast_params(self, value):
value = list(value)
if self._fast_params is None:
self._fast_params = []
if self.track_higher_grads:
self._fast_params.append(value)
else:
self._fast_params[0] = value
@property
def track_higher_grads(self):
return self._track_higher_grads
@track_higher_grads.setter
def track_higher_grads(self, value):
if not isinstance(value, bool):
raise ValueError("Expected boolean argument. Got: {}.".format(type(value)))
self._track_higher_grads = value
def buffer_sync(
module: _torch.nn.Module,
fmodule: _MonkeyPatchBase,
device: _typing.Optional[_torch.device] = None,
) -> None:
r"""One off sync (copy) of buffers in ``fmodule`` with those from ``module``."""
for key, value in module._buffers.items():
if not _torch.is_tensor(value):
fmodule._buffers[key] = value
elif device is None:
fmodule._buffers[key] = value.clone().detach()
else:
fmodule._buffers[key] = value.clone().detach().to(device)
for name, child in module._modules.items():
if name in fmodule._modules:
buffer_sync(child, fmodule._modules[name], device)
else:
raise KeyError(
"Did not find expected submodule "
"{} of monkey-patched module {}.".format(name, fmodule)
)
# ==============================================================================
# Helper class to use instead of actual torch.nn.Parameters when patching.
# ==============================================================================
class _ParameterPlaceholder:
def __init__(self, name: str) -> None:
self._param_name = name
def __repr__(self) -> str:
return 'Parameter placeholder ("{}")'.format(self._param_name)
_ParameterPlaceholder.__name__ = "ParameterPlaceholder"
_ParameterPlaceholder.__qualname__ = "ParameterPlaceholder"
# ==============================================================================
# Helper function for recursively patching submodules.
# ==============================================================================
def _make_functional(
module: _torch.nn.Module,
params_box: _typing.Sequence[_typing.Optional[_typing.List[_torch.Tensor]]],
params_offset: int,
root_patched: _typing.Optional[_MonkeyPatchBase] = None,
) -> _typing.Tuple[int, _MonkeyPatchBase, _typing.Type[_MonkeyPatchBase]]:
if isinstance(module, _MonkeyPatchBase):
raise ValueError(
"Monkey-patching monkey-patched modules is untested uncharted "
"territory, so we're going to assume it's done in error. If you "
"are doing this intentionally and need this to be supported, "
"contact the developers of this library."
)
param_names = list(
name
for name in module._parameters.keys()
if module._parameters[name] is not None
)
_ModuleType: _typing.Type[_torch.nn.Module] = module.__class__
# type checking of next line disabled as mypy is iffy with dynamic types
class MonkeyPatched(_ModuleType, _MonkeyPatchBase): # type: ignore
_wrapped_name = type(module).__name__
def __init__(self, original_params, root) -> None:
_torch.nn.Module.__init__(self)
_MonkeyPatchBase.__init__(self)
self._root_ref = _weakref.ref(root) if root else None
self._fast_params = None
self._param_names = param_names
self._original_params = original_params
# for pretty printing
self._parameters = _OrderedDict(
(name, _ParameterPlaceholder(name)) for name in self._param_names
)
self._modules: _typing.Dict[str, _MonkeyPatchBase] = _OrderedDict()
@property
def direct_submodule_call(self):
return params_box[0] is None
@property
def is_root(self):
return self._root_ref is None
@property
def root(self):
if self.is_root:
return self
else:
return self._root_ref()
def __setattr__(self, name, value):
def remove_from(*dicts):
for d in dicts:
if name in d:
del d[name]
params = self.__dict__.get("_parameters")
if params is not None and name in params:
if not isinstance(value, _torch.Tensor):
raise TypeError(
"Require Tensor as fast weights. "
"Got {}".format(_torch.typename(value))
)
if not self._being_modified_internally:
# Additional behaviour for when fast weights are being
# directly modified goes here:
old_value = self._parameters[name]
fast_params = self.root.fast_params[:]
if not fast_params:
raise Exception(
"Cannot assign parameters to patched module which "
"does not have implicit fast parameters."
)
replacement_index = _utils._find_param_in_list(
old_value, fast_params
)
fast_params[replacement_index] = value
self.update_params(fast_params)
# Change parameters in place, usually during boxed_forward pass
self._parameters[name] = value
else:
modules = self.__dict__.get("_modules")
if isinstance(value, _torch.nn.Module):
if modules is None:
raise AttributeError(
"cannot assign module before Module.__init__() " "call"
)
remove_from(self.__dict__, self._parameters, self._buffers)
modules[name] = value
elif modules is not None and name in modules:
if value is not None:
raise TypeError(
(
"cannot assign '{}' "
"as child module '{}'"
"(torch.nn.Module or None expected)"
).format(_torch.typename(value), name)
)
modules[name] = value
else:
buffers = self.__dict__.get("_buffers")
if buffers is not None and name in buffers:
if value is not None and not isinstance(value, _torch.Tensor):
raise TypeError(
"cannot assign '{}' as buffer '{}' "
"(torch.Tensor or None expected)".format(
_torch.typename(value), name
)
)
buffers[name] = value
else:
object.__setattr__(self, name, value)
MonkeyPatched.__name__ = "InnerFunctional" + type(module).__name__
MonkeyPatched.__qualname__ = MonkeyPatched.__name__
fmodule = MonkeyPatched(module.parameters(), root=root_patched)
# If a root module hasn't been defined yet, this fmodule is the root
if not root_patched:
root_patched = fmodule
# use 1 as dummy list item since we are only counting
num_params = len([1 for p in module._parameters.values() if p is not None])
# Copy over all attributes
for name, attr in module.__dict__.items():
if name in _internal_attrs:
continue
setattr(fmodule, name, attr)
# Deal with "None"-style params
with _modify_internally(fmodule):
for name, attr in module.__dict__["_parameters"].items():
if isinstance(attr, _torch.nn.Parameter):
continue
else:
setattr(fmodule, name, attr)
child_params_offset = params_offset + num_params
for name, child in module._modules.items():
child_params_offset, fchild, _ = _make_functional(
child, params_box, child_params_offset, root_patched
)
fmodule._modules[name] = fchild
setattr(fmodule, name, fchild)
true_forward = type(module).forward
def patched_forward(self, *args, params=None, **kwargs):
if self.direct_submodule_call:
# If submodule was called directly, run intialisation that happens
# at top level call. If *full set of params* is provided here, it
# will use those. If not, it will fall back on fast weights.
# In the future, we should be able to support passing only the
# submodule (+ children) weights here, but that's not simple.
self.root._refill_params_box(params)
with _modify_internally(self):
for name, param in zip(
self._param_names,
params_box[0][params_offset : params_offset + num_params],
):
setattr(self, name, param)
# This snippet deals with torch.nn.{RNN,GRU,LSTM}
if hasattr(self, "_flat_weights_names"):
self._flat_weights = [
self._parameters[wn] for wn in self._flat_weights_names
]
# Call true_forward after some checks
with _warnings.catch_warnings():
# If running RNNs on GPU, surpress the warnings due to flattening
# not happening here. Maybe we should raise a warning of our own?
is_RNN = isinstance(module, _torch.nn.RNNBase)
if is_RNN and _torch.cuda.is_available():
_warnings.simplefilter("ignore", category=UserWarning)
return true_forward(self, *args, **kwargs)
setattr(MonkeyPatched, "forward", patched_forward)
def flatten_parameters(self):
return # no-op
# This (hopefully) avoids trouble on GPU with torch.nn.{RNN,GRU,LSTM}
if hasattr(module, "flatten_parameters"):
setattr(MonkeyPatched, "flatten_parameters", flatten_parameters)
return child_params_offset, fmodule, type(fmodule)
def _update_patched_params(
fmodule: _MonkeyPatchBase,
params_box: _typing.Sequence[_typing.List[_torch.Tensor]],
params_offset: int,
) -> int:
num_params = len([1 for p in fmodule._parameters.values() if p is not None])
child_params_offset = params_offset + num_params
for name, child in fmodule._modules.items():
child_params_offset = _update_patched_params(
child, params_box, child_params_offset
)
with _modify_internally(fmodule):
for name, param in zip(
fmodule._param_names,
params_box[0][params_offset : params_offset + num_params],
):
setattr(fmodule, name, param)
return child_params_offset
# ==============================================================================
# The main function which does the monkey patching.
# ==============================================================================
_EncapsulatorType = _typing.Optional[
_typing.Callable[[_MonkeyPatchBase, _torch.nn.Module], None]
]
def make_functional(
module: _torch.nn.Module, encapsulator: _EncapsulatorType = None
) -> _MonkeyPatchBase:
r"""Returns a stateless version of an ``nn.Module`` instance."""
params_box = [None]
_, fmodule, MonkeyPatched = _make_functional(module, params_box, 0)
top_name = "Functional" + MonkeyPatched._wrapped_name
MonkeyPatched.__name__ = MonkeyPatched.__qualname__ = top_name
MonkeyPatched.boxed_forward = MonkeyPatched.forward
param_mapping = _utils._get_param_mapping(module, [], [])
setattr(fmodule, "_param_mapping", param_mapping)
def _refill_params_box(self, params):
if params is not None:
self.fast_params = params # update view on latest fast params
elif self.fast_params is None:
raise ValueError(
"params keyword must be provided if patched module not "
"tracking its own fast parameters"
)
# Copy fast parameters into params_box for use in boxed_forward
params_box[0] = self._expand_params(self.fast_params)
def _patched_forward(self, *args, params=None, **kwargs):
self._refill_params_box(params)
output = self.boxed_forward(*args, **kwargs)
# Clean up
params_box[0] = None
return output
def _update_params(self, params):
self.fast_params = params
params = self._expand_params(params)
_update_patched_params(self, [params], 0)
setattr(MonkeyPatched, "forward", _patched_forward)
setattr(MonkeyPatched, "parameters", _patched_parameters)
setattr(MonkeyPatched, "update_params", _update_params)
setattr(MonkeyPatched, "_refill_params_box", _refill_params_box)
if encapsulator is not None:
encapsulator(fmodule, module)
return fmodule
# ==============================================================================
# Convenience functions and decorators for hiding away a lot of the complexity
# of creating patched modules, taking their parameters, and linking patched
# modules to a differentiable optimizer.
# ==============================================================================
def monkeypatch(
module: _torch.nn.Module,
device: _typing.Optional[_torch.device] = None,
copy_initial_weights: bool = True,
track_higher_grads: bool = True,
) -> _MonkeyPatchBase:
r"""Create a monkey-patched stateless version of a module.
This function produces a monkey-patched version of a module, and returns a
copy of its parameters for use as fast weights. Where the original module
or any of its submodules have state (e.g. batch norm), this will be copied
too, but further updates (e.g. during inner loop training) will cause these
to diverge without changing the state of the original module.
Args:
module: a ``torch.nn.Module`` subclass instance.
device (optional): a device to cast the fast weights and state to.
copy_initial_weights: if True, the weights of the patched module are
copied to form the initial weights of the patched module, and thus
are not part of the gradient tape when unrolling the patched module.
If this is set to False, the actual module weights will be the
initial weights of the patched module. This is useful when doing
MAML, for example.
track_higher_grads: if True, during unrolled optimization the graph be
retained, and the fast weights will bear grad funcs, so as to permit
backpropagation through the optimization process. Setting this to
False allows ``monkeypatch`` to be used in "test mode", without
potentially tracking higher order gradients. This can be useful when
running the training loop at test time, e.g. in k-shot learning
experiments, without incurring a significant memory overhead.
Returns:
``fmodule``: a "stateless" version of the original module, for which calls
to forward take the additional kwarg-only parameter ``params``, which
should be a list of torch tensors requiring gradients, ideally
provided by this function (see below) or by an update step from one
of the optimizers in ``higher.optim``.
"""
def encapsulator(fmodule: _MonkeyPatchBase, module: _torch.nn.Module) -> None:
if copy_initial_weights:
params = _utils.get_func_params(module, device=device)
else:
params = [
p.clone() if device is None else p.clone().to(device)
for p in module.parameters()
]
buffer_sync(module, fmodule, device)
fmodule.update_params(params)
fmodule = make_functional(module, encapsulator=encapsulator)
fmodule.track_higher_grads = track_higher_grads
return fmodule
|
Blackbox-Coresets-VI-main
|
psvi/robust_higher/patch.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All Rights Reserved.
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing as _typing
from contextlib import contextmanager as _contextmanager
import torch as _torch
from . import optim
from .patch import monkeypatch
@_contextmanager
def innerloop_ctx(
model: _torch.nn.Module,
opt: _torch.optim.Optimizer,
device: _typing.Optional[_torch.device] = None,
copy_initial_weights: bool = True,
override: optim._OverrideType = None,
track_higher_grads: bool = True,
):
r"""A context manager for writing differentiable inner loops.
Args:
model: a ``torch.nn.Module`` subclass instance.
opt: an existing optimizer, assumed to be an instance of
``torch.optim.Optimizer``, of a supported type which is either
defined in ``torch.optim``, or a custom implemantation which has
been added to higher at runtime by using ``higher.register_optim``.
We assume this optimizer tracks the parameters (or some subset
thereof) of a single ``torch.nn.Module`` instance, with support for
parameter groups.
device (optional): a device to cast the fast weights and state to. If
not specified, the device used for corresponding weights of
``model`` will be used.
copy_initial_weights: if true, the weights of the patched module are
copied to form the initial weights of the patched module, and thus
are not part of the gradient tape when unrolling the patched module.
If this is set to False, the actual module weights will be the
initial weights of the patched module. This is useful when doing
MAML, for example.
override (optional): a dictionary mapping optimizer settings (i.e. those
which would be passed to the optimizer constructor or provided
within parameter groups) to either singleton lists of override
values, or to a list of override values of length equal to the
number of parameter groups. If a single override is provided for a
keyword, it is used for all parameter groups. If a list is provided,
the ``i``\ th element of the list overrides the corresponding
setting in the ``i``\ th parameter group. This permits the passing
of tensors requiring gradient to differentiable optimizers for use
as optimizer settings.
track_higher_grads: if True, during unrolled optimization the graph be
retained, and the fast weights will bear grad funcs, so as to permit
backpropagation through the optimization process. Setting this to
False allows ``innerloop_ctx`` to be used in "test mode", without
potentially tracking higher order gradients. This can be useful when
running the training loop at test time, e.g. in k-shot learning
experiments, without incurring a significant memory overhead.
Yields:
A ``(fmodule, diffopt)`` tuple. where ``fmodule`` is a "stateless"
version of the original module, for which calls to forward take the
additional kwarg-only parameter ``params``, which should be a list of
torch tensors requiring gradients, ideally provided by this function
(see below) or by an update step from one of the optimizers in
``higher.optim``. And ``diffopt`` is an initialized
``DifferentiableOptimizer`` instance of the right subtype.
"""
fmodel = monkeypatch(
model,
device,
copy_initial_weights=copy_initial_weights,
track_higher_grads=track_higher_grads,
)
diffopt = optim.get_diff_optim(
opt,
model.parameters(),
fmodel=fmodel,
device=device,
override=override,
track_higher_grads=track_higher_grads,
)
yield fmodel, diffopt
__all__: list = ["innerloop_ctx"]
|
Blackbox-Coresets-VI-main
|
psvi/robust_higher/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All Rights Reserved.
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for components of ``higher``\ ."""
import typing as _typing
import torch as _torch
_T = _typing.TypeVar("_T")
_U = _typing.TypeVar("_U")
def _copy_tensor(
t: _torch.Tensor, safe_copy: bool, device: _typing.Optional[_torch.device] = None
) -> _torch.Tensor:
if safe_copy:
t = t.clone().detach().requires_grad_(t.requires_grad)
else:
t = t.detach().requires_grad_(t.requires_grad)
t = t if device is None else t.to(device)
return t
def _recursive_copy_and_cast(
target: _typing.Union[list, tuple, dict, set, _torch.Tensor],
device: _typing.Optional[_torch.device],
) -> _torch.Tensor:
def map_fn(x):
if _torch.is_tensor(x):
return _copy_tensor(x, True, device=device)
else:
return x
return _recursive_map(target, map_fn)
def _recursive_map(
target: _typing.Union[list, tuple, dict, set, _T],
map_fn: _typing.Callable[[_T], _U],
) -> _typing.Union[list, tuple, dict, set, _U]:
if isinstance(target, list):
return type(target)([_recursive_map(x, map_fn) for x in target])
elif isinstance(target, tuple):
return type(target)([_recursive_map(x, map_fn) for x in target])
elif isinstance(target, dict):
return type(target)({k: _recursive_map(v, map_fn) for k, v in target.items()})
elif isinstance(target, set):
return type(target)({_recursive_map(x, map_fn) for x in target})
else:
return map_fn(target)
def _is_container(target: _typing.Any) -> bool:
flag = (
isinstance(target, list)
or isinstance(target, tuple)
or isinstance(target, dict)
or isinstance(target, set)
)
return flag
def _find_param_in_list(
param: _torch.Tensor, l: _typing.Iterable[_torch.Tensor]
) -> _typing.Optional[int]:
for i, p in enumerate(l):
if p is param:
return i
else:
return None
def _get_param_mapping(
module: _torch.nn.Module,
seen: _typing.List[_torch.Tensor],
mapping: _typing.List[int],
) -> _typing.List[int]:
for param in module._parameters.values():
if param is None:
continue
found = _find_param_in_list(param, seen)
if found is None:
mapping.append(len(seen))
seen.append(param)
else:
mapping.append(found)
for name, child in module._modules.items():
_ = _get_param_mapping(child, seen, mapping)
return mapping
def flatten(x: _typing.Any) -> _typing.List[_typing.Any]:
r"""Returns a flattened list of objects from a nested structure."""
l: _typing.List[_typing.Any] = []
if isinstance(x, dict):
for y in x.values():
l.extend(flatten(y))
elif isinstance(x, list) or isinstance(x, set) or isinstance(x, tuple):
for y in x:
l.extend(flatten(y))
else:
l.append(x)
return l
def get_func_params(
module: _torch.nn.Module,
device: _typing.Optional[_torch.device] = None,
safe_copy: bool = True,
) -> _typing.List[_torch.Tensor]:
r"""Returns a detached copy of module parameters which requires gradient."""
params = [_copy_tensor(p, safe_copy, device) for p in module.parameters()]
return params
|
Blackbox-Coresets-VI-main
|
psvi/robust_higher/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All Rights Reserved.
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Differentiable optimizer wrappers around ``torch.optim`` instances."""
import abc as _abc
import collections as _collections
import copy as _copy
import math as _math
import typing as _typing
import warnings as _warnings
import torch as _torch
from . import patch as _patch, utils as _utils
_GroupedGradsType = _typing.List[_typing.List[_torch.Tensor]]
_StateType = _typing.List[_typing.DefaultDict[int, _typing.Any]]
_GradClosureType = _typing.Callable[[_torch.Tensor], _torch.Tensor]
_OverrideType = _typing.Dict[str, _typing.List[_typing.Any]]
_GradCallbackType = _typing.Callable[
[_typing.List[_torch.Tensor]], _typing.List[_torch.Tensor]
]
def _get_mask_closure(mask: _torch.Tensor) -> _GradClosureType:
def closure(grad: _torch.Tensor) -> _torch.Tensor:
grad = _torch.where(mask, _torch.zeros_like(grad), grad)
if grad.requires_grad:
grad.register_hook(_get_mask_closure(mask))
return grad
return closure
def _maybe_mask(tensor: _torch.Tensor, mask: _torch.Tensor) -> None:
if tensor.requires_grad:
tensor.register_hook(_get_mask_closure(mask))
class DifferentiableOptimizer(_abc.ABC):
def __init__(
self,
other: _torch.optim.Optimizer,
reference_params: _typing.Iterable[_torch.Tensor],
fmodel: _typing.Optional[_patch._MonkeyPatchBase] = None,
device: _typing.Optional[_torch.device] = None,
override: _typing.Optional[_OverrideType] = None,
grad_callback: _typing.Optional[_GradCallbackType] = None,
track_higher_grads: bool = True,
**kwargs,
) -> None:
r"""Initialize the optimizer with the state of an existing optimizer.
Args:
other: an existing optimizer instance.
reference_params: an iterable over the parameters of the original
model.
fmodel (optional): a patched stateless module with a view on
weights.
device (optional): the device to cast state tensors to.
override (optional): a dictionary mapping optimizer settings (i.e.
those which would be passed to the optimizer constructor or
provided within parameter groups) to either singleton lists of
override values, or to a list of override values of length equal
to the number of parameter groups. If a single override is
provided for a keyword, it is used for all parameter groups. If
a list is provided, the ``i``\ th element of the list overrides the
corresponding setting in the ``i``\ th parameter group. This permits
the passing of tensors requiring gradient to differentiable
optimizers for use as optimizer settings.
grad_callback: (optional) a single argument function which will be
applied to a list of gradients of parameters, which respects the
order specified by ``reference_params``. This can be used to
apply a function, such as gradient clipping, to all (or a
subset) of these gradients every time the step function is
called. If this keyword argument is provided when calling the
step method, its value will override the default specified here.
track_higher_grads: if True, during unrolled optimization the graph
be retained, and the fast weights will bear grad funcs, so as to
permit backpropagation through the optimization process. Setting
this to False allows the differentiable optimizer to be used in
"test mode", without potentially tracking higher order
gradients. This can be useful when running the training loop at
test time, e.g. in k-shot learning experiments, without
incurring a significant memory overhead.
"""
reference_params = list(reference_params)
# Copy param groups and set up structures for copy state
self.param_groups = _copy.deepcopy(other.param_groups)
self._group_to_param_list: _typing.List[_typing.List[int]] = []
self.state: _StateType = [
_collections.defaultdict(dict) for _ in range(len(self.param_groups))
]
# Deal with override
if override is not None:
self._apply_override(override)
self._grad_callback = grad_callback
# Copy and cast state
zipped = zip(self.param_groups, other.param_groups)
for group_idx, (group, orig_group) in enumerate(zipped):
local_list = []
for p_idx, p in enumerate(orig_group["params"]):
if p in other.state:
self.state[group_idx][p_idx] = {
k: _utils._recursive_copy_and_cast(v, device)
for k, v in other.state[p].items()
}
index = _utils._find_param_in_list(p, reference_params)
if index is None:
raise ValueError(
"Could not find parameter {} in reference parameters.".format(
str(p)
)
)
local_list.append(index)
group["params"] = [None] * len(group["params"])
self._group_to_param_list.append(local_list)
self._fmodel = fmodel
self._track_higher_grads = track_higher_grads
def _apply_override(self, override: _OverrideType) -> None:
for k, v in override.items():
# Sanity check
if (len(v) != 1) and (len(v) != len(self.param_groups)):
raise ValueError(
"Mismatch between the number of override tensors for "
"optimizer parameter {} and the number of "
"parameter groups.".format(k)
)
for group_idx, group in enumerate(self.param_groups):
group[k] = v[0] if len(v) == 1 else v[group_idx]
def step(
self,
loss: _torch.Tensor,
params: _typing.Iterable[_torch.Tensor] = None,
override: _typing.Optional[_OverrideType] = None,
grad_callback: _typing.Optional[_GradCallbackType] = None,
**kwargs,
) -> _typing.Iterable[_torch.Tensor]:
r"""Perform a model update.
This would be used by replacing the normal sequence::
opt.zero_grad()
loss.backward()
opt.step()
with::
diffopt.step(loss)
Args:
loss: the loss tensor.
params (optional): the parameters with regard to which we measure
the loss. These must be provided if the differentiable optimizer
did not receive a patched model with a view over its own fast
weights at initialisation. If there is such a model, and params
are provided, they will overwrite the params of the encapsulated
model.
override (optional): a dictionary mapping optimizer settings (i.e.
those which would be passed to the optimizer constructor or
provided within parameter groups) to either singleton lists of
override values, or to a list of override values of length equal
to the number of parameter groups. If a single override is
provided for a keyword, it is used for all parameter groups. If
a list is provided, the ``i``\ th element of the list overrides
the corresponding setting in the ``i``\ th parameter group. This
permits the passing of tensors requiring gradient to
differentiable optimizers for use as optimizer settings. Setting
override here has highest precedence, i.e. it will override any
tensors provided as override during the creation of the
differentiable optimizer, where there is name clash.
grad_callback: (optional) a single argument function which will be
applied to a list of gradients of parameters, which respects the
order specified by ``reference_params``. This can be used to
apply a function, such as gradient clipping, to all (or a
subset) of these gradients every time the step function is
called. This callback overrides the default provided when
constructing the differentiable optimizer.
Returns:
The updated parameters, which will individually have ``grad_fn``\ s
of their own. If the optimizer has an encapsulated patched model,
its view over its own fast weights will be updated with these
params.
"""
# Deal with override
if override is not None:
self._apply_override(override)
if self._fmodel is None or self._fmodel.fast_params is None:
if params is None:
raise ValueError(
"params kwarg must be passed to step if the differentiable "
"optimizer doesn't have a view on a patched model with "
"params."
)
else:
params = self._fmodel.fast_params if params is None else params
params = list(params)
# This allows us to gracefully deal with cases where params are frozen.
grad_targets = [
p if p.requires_grad else _torch.tensor([], requires_grad=True)
for p in params
]
all_grads = _torch.autograd.grad(
loss,
grad_targets,
create_graph=self._track_higher_grads,
allow_unused=True, # boo
)
if grad_callback is not None:
all_grads = grad_callback(all_grads)
elif self._grad_callback is not None:
all_grads = self._grad_callback(all_grads)
grouped_grads = []
for group, mapping in zip(self.param_groups, self._group_to_param_list):
grads = []
for i, index in enumerate(mapping):
group["params"][i] = params[index]
grads.append(all_grads[index])
grouped_grads.append(grads)
self._update(grouped_grads)
new_params = params[:]
for group, mapping in zip(self.param_groups, self._group_to_param_list):
for p, index in zip(group["params"], mapping):
if self._track_higher_grads:
new_params[index] = p
else:
new_params[index] = p.detach().requires_grad_()
if self._fmodel is not None:
self._fmodel.update_params(new_params)
return new_params
@_abc.abstractmethod
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
pass
class DifferentiableSGD(DifferentiableOptimizer):
r"""A differentiable version of the SGD optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
if weight_decay != 0:
g = _add(g, weight_decay, p)
if momentum != 0:
param_state = self.state[group_idx][p_idx]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = g
else:
buf = param_state["momentum_buffer"]
buf = _add(buf.mul(momentum), 1 - dampening, g)
param_state["momentum_buffer"] = buf
if nesterov:
g = _add(g, momentum, buf)
else:
g = buf
group["params"][p_idx] = _add(p, -group["lr"], g)
class DifferentiableAdam(DifferentiableOptimizer):
r"""A differentiable version of the Adam optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
amsgrad = group["amsgrad"]
beta1, beta2 = group["betas"]
weight_decay = group["weight_decay"]
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
state = self.state[group_idx][p_idx]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = _torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = _torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. mov. avg. of sq. grad. vals
state["max_exp_avg_sq"] = _torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
state["step"] += 1
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
if weight_decay != 0:
g = g + (weight_decay * p)
# Decay the first and second moment running average coefficient
state["exp_avg"] = exp_avg = (exp_avg * beta1) + (1 - beta1) * g
state["exp_avg_sq"] = exp_avg_sq = (exp_avg_sq * beta2) + (
1 - beta2
) * g * g
# Deal with stability issues
mask = exp_avg_sq == 0.0
_maybe_mask(exp_avg_sq, mask)
exp_avg_sq = exp_avg_sq + 1e-8
if amsgrad:
# Maintains the max of all 2nd moment running avg. till now
state["max_exp_avg_sq"] = max_exp_avg_sq = _torch.max(
max_exp_avg_sq, exp_avg_sq
)
# Use the max. for normalizing running avg. of gradient
denom = _add(
max_exp_avg_sq.sqrt() / _math.sqrt(bias_correction2),
group["eps"],
)
else:
denom = _add(
exp_avg_sq.sqrt() / _math.sqrt(bias_correction2), group["eps"]
)
step_size = group["lr"] / bias_correction1
group["params"][p_idx] = _addcdiv(p, -step_size, exp_avg, denom)
class DifferentiableAdamW(DifferentiableOptimizer):
r"""A differentiable version of the AdamW optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
amsgrad = group["amsgrad"]
beta1, beta2 = group["betas"]
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
# Perform stepweight decay
p = p * (1 - group["lr"] * group["weight_decay"])
if g.is_sparse:
raise RuntimeError("AdamW does not support sparse gradients")
state = self.state[group_idx][p_idx]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = _torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = _torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. mov. avg. of sq. grad. vals
state["max_exp_avg_sq"] = _torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
state["step"] += 1
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
# Decay the first and second moment running average coefficient
state["exp_avg"] = exp_avg = (exp_avg * beta1) + (1 - beta1) * g
state["exp_avg_sq"] = exp_avg_sq = (exp_avg_sq * beta2) + (
1 - beta2
) * g * g
# Deal with stability issues
mask = exp_avg_sq == 0.0
_maybe_mask(exp_avg_sq, mask)
if amsgrad:
# Maintains the max of all 2nd moment running avg. till now
state["max_exp_avg_sq"] = max_exp_avg_sq = _torch.max(
max_exp_avg_sq, exp_avg_sq
)
# Use the max. for normalizing running avg. of gradient
denom = _add(
max_exp_avg_sq.sqrt() / _math.sqrt(bias_correction2),
group["eps"],
)
else:
denom = _add(
exp_avg_sq.sqrt() / _math.sqrt(bias_correction2), group["eps"]
)
step_size = group["lr"] / bias_correction1
group["params"][p_idx] = _addcdiv(p, -step_size, exp_avg, denom)
class DifferentiableAdadelta(DifferentiableOptimizer):
r"""A differentiable version of the Adadelta optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
rho, eps = group["rho"], group["eps"]
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
if g.data.is_sparse:
raise RuntimeError("Adadelta does not support sparse gradients")
state = self.state[group_idx][p_idx]
# State initialization
if len(state) == 0:
state["step"] = 0
state["square_avg"] = _torch.zeros_like(p.data)
state["acc_delta"] = _torch.zeros_like(p.data)
square_avg, acc_delta = state["square_avg"], state["acc_delta"]
state["step"] += 1
if group["weight_decay"] != 0:
g = _add(g, group["weight_decay"], p)
square_avg = _addcmul(square_avg.mul(rho), 1 - rho, g, g)
state["square_avg"] = square_avg
std = _add(square_avg, eps).sqrt()
delta = _add(acc_delta, eps).sqrt().div(std).mul(g)
state["acc_delta"] = _addcmul(acc_delta.mul(rho), 1 - rho, delta, delta)
group["params"][p_idx] = _add(p, -group["lr"], delta)
class DifferentiableAdagrad(DifferentiableOptimizer):
r"""A differentiable version of the Adagrad optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
state = self.state[group_idx][p_idx]
state["step"] += 1
if group["weight_decay"] != 0:
if g.data.is_sparse:
raise RuntimeError(
"weight_decay option is not compatible with sparse "
"gradients"
)
g = _add(g, group["weight_decay"], p)
clr = group["lr"] / (1 + (state["step"] - 1) * group["lr_decay"])
if g.is_sparse:
# TODO: implement support for sparse gradients.
raise NotImplementedError(
"sparse gradient support for DifferentiableAdagrad not "
"implemented yet."
)
else:
state["sum"] = sum_ = _addcmul(state["sum"], 1, g, g)
mask = sum_ == 0.0
_maybe_mask(sum_, mask)
std = _add(
state["sum"].sqrt(), group["eps"] if "eps" in group else 1e-10
)
group["params"][p_idx] = _addcdiv(p, -clr, g, std)
class DifferentiableAdamax(DifferentiableOptimizer):
r"""A differentiable version of the Adamax optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
if g.is_sparse:
raise RuntimeError("Adamax does not support sparse gradients")
state = self.state[group_idx][p_idx]
# State initialization
if len(state) == 0:
state["step"] = 0
state["exp_avg"] = _torch.zeros_like(p.data)
state["exp_inf"] = _torch.zeros_like(p.data)
exp_avg, exp_inf = state["exp_avg"], state["exp_inf"]
beta1, beta2 = group["betas"]
eps = group["eps"]
state["step"] += 1
if group["weight_decay"] != 0:
g = _add(g, group["weight_decay"], p)
# Update biased first moment estimate
state["exp_avg"] = exp_avg = _add(exp_avg.mul(beta1), 1 - beta1, g)
# Update the exponentially weighted infinity norm.
state["exp_inf"] = exp_inf = exp_inf.mul(beta2).unsqueeze(0)
norm_buf = _torch.cat([exp_inf, _add(g.abs(), eps).unsqueeze(0)], 0)
exp_inf, _ = _torch.max(norm_buf, 0, keepdim=False)
state["exp_inf"] = exp_inf
bias_correction = 1 - beta1 ** state["step"]
clr = group["lr"] / bias_correction
group["params"][p_idx] = _addcdiv(p, -clr, exp_avg, exp_inf)
class DifferentiableASGD(DifferentiableOptimizer):
r"""A differentiable version of the ASGD optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
if g.is_sparse:
raise RuntimeError("ASGD does not support sparse gradients")
state = self.state[group_idx][p_idx]
# State initialization
if len(state) == 0:
state["step"] = 0
state["eta"] = group["lr"]
state["mu"] = 1
state["ax"] = _torch.zeros_like(p.data)
state["step"] += 1
if group["weight_decay"] != 0:
g = _add(g, group["weight_decay"], p)
# decay term
p = p.mul(1 - group["lambd"] * state["eta"])
# update parameter
group["params"][p_idx] = _add(p, -state["eta"], g)
# averaging
if state["mu"] != 1:
state["ax"] = _add(state["ax"], p.sub(state["ax"]).mul(state["mu"]))
else:
state["ax"] = p
# update eta and mu
state["eta"] = group["lr"] / _math.pow(
(1 + group["lambd"] * group["lr"] * state["step"]), group["alpha"]
)
state["mu"] = 1 / max(1, state["step"] - group["t0"])
class DifferentiableRMSprop(DifferentiableOptimizer):
r"""A differentiable version of the RMSprop optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
_warnings.warn(
"Differentiable RMSprop suffers from gradient correctness issues. "
"Consider using another optimizer until we fix these..."
)
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
if g.is_sparse:
raise RuntimeError("RMSprop does not support sparse gradients")
state = self.state[group_idx][p_idx]
# State initialization
if len(state) == 0:
state["step"] = 0
state["square_avg"] = _torch.zeros_like(p.data)
if group["momentum"] > 0:
state["momentum_buffer"] = _torch.zeros_like(p.data)
if group["centered"]:
state["grad_avg"] = _torch.zeros_like(p.data)
square_avg = state["square_avg"]
alpha = group["alpha"]
state["step"] += 1
if group["weight_decay"] != 0:
g = _add(g, group["weight_decay"], p)
square_avg = _addcmul(square_avg.mul(alpha), 1 - alpha, g, g)
state["square_avg"] = square_avg
# NB: This prevents nans but is not sufficient to recover
# correct gradients.
mask = square_avg == 0.0
_maybe_mask(square_avg, mask)
if group["centered"]:
grad_avg = state["grad_avg"]
grad_avg = _add(grad_avg.mul(alpha), 1 - alpha, g)
state["grad_avg"] = grad_avg
eps = group["eps"]
avg = _add(_addcmul(square_avg, -1, grad_avg, grad_avg).sqrt(), eps)
else:
avg = _add(square_avg.sqrt(), group["eps"])
if group["momentum"] > 0:
buf = state["momentum_buffer"]
buf = _addcdiv(buf.mul(group["momentum"]), g, avg)
state["momentum_buffer"] = buf
p = _add(p, -group["lr"], buf)
else:
p = _addcdiv(p, -group["lr"], g, avg)
group["params"][p_idx] = p
class DifferentiableRprop(DifferentiableOptimizer):
r"""A differentiable version of the Rprop optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
_warnings.warn(
"Differentiable Rprop (correctly) yields zero second order "
"gradients, as only the sign of the gradient is used in updates. "
"Future versions will offer higher order gradients based on a "
"continuous relaxation of the forward pass."
)
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
if g.is_sparse:
raise RuntimeError("Rprop does not support sparse gradients")
state = self.state[group_idx][p_idx]
# State initialization
if len(state) == 0:
state["step"] = 0
state["prev"] = _torch.zeros_like(p.data)
state["step_size"] = g.new().resize_as_(g).fill_(group["lr"])
etaminus, etaplus = group["etas"]
step_size_min, step_size_max = group["step_sizes"]
step_size = state["step_size"]
state["step"] += 1
sign = g.mul(state["prev"]).sign()
sign[sign.gt(0)] = etaplus
sign[sign.lt(0)] = etaminus
sign[sign.eq(0)] = 1
# update stepsizes with step size updates
step_size = step_size.mul(sign).clamp(step_size_min, step_size_max)
state["step_size"] = step_size
# for dir<0, dfdx=0
# for dir>=0 dfdx=dfdx
g = _torch.where(sign.eq(etaminus), _torch.zeros_like(g), g)
# update parameters
group["params"][p_idx] = _addcmul(p, -1, g.sign(), step_size)
state["prev"] = g.clone()
_OptMappingType = _typing.Dict[
_torch.optim.Optimizer, _typing.Type[DifferentiableOptimizer]
]
_opt_mapping: _OptMappingType = {
_torch.optim.Adadelta: DifferentiableAdadelta,
_torch.optim.Adagrad: DifferentiableAdagrad,
_torch.optim.Adam: DifferentiableAdam,
_torch.optim.AdamW: DifferentiableAdamW,
_torch.optim.Adamax: DifferentiableAdamax,
_torch.optim.ASGD: DifferentiableASGD,
_torch.optim.RMSprop: DifferentiableRMSprop,
_torch.optim.Rprop: DifferentiableRprop,
_torch.optim.SGD: DifferentiableSGD,
}
def get_diff_optim(
opt: _torch.optim.Optimizer,
reference_params: _typing.Iterable[_torch.Tensor],
fmodel: _typing.Optional[_patch._MonkeyPatchBase] = None,
device: _typing.Optional[_torch.device] = None,
override: _typing.Optional[_OverrideType] = None,
track_higher_grads: bool = True,
**kwargs,
) -> DifferentiableOptimizer:
r"""Construct/initialize a differentiable version of an existing optimizer.
Args:
opt: an existing optimizer, assumed to be an instance of
``torch.optim.Optimizer``, of a supported type which is either defined
in ``torch.optim``, or a custom implemantation which has been added to
higher at runtime by using ``higher.register_optim``. We assume this
optimizer tracks the parameters (or some subset thereof) of a single
``torch.nn.Module`` instance, with support for parameter groups.
reference_params: the parameters of the module tracked by ``opt``, as
returned by ``module.parameters()``.
fmodel (optional): a patched version of the ``module`` tracked by ``opt``.
It is assumed this patched instance has a view on its latest fast
weights through ``fmodel.parameters()``. If provided, it is not
necessary to pass the fast weights explicitly to the differentiable
optimizer's ``step`` function via the keyword arg ``params``. If not
provided, the fast weights to update must be provided to ``step``.
device (optional): the device to cast the optimizer state to when
creating the differentiable optimizer. If not provided, the same
device as used for the parameters tracked by ``opt`` will be used.
override (optional): a dictionary mapping optimizer settings (i.e.
those which would be passed to the optimizer constructor or
provided within parameter groups) to either singleton lists of
override values, or to a list of override values of length equal to
the number of parameter groups. If a single override is provided for
a keyword, it is used for all parameter groups. If a list is
provided, the ``i``\ th element of the list overrides the corresponding
setting in the ``i``\ th parameter group. This permits the passing of
tensors requiring gradient to differentiable optimizers for use as
optimizer settings.
track_higher_grads: if True, during unrolled optimization the graph be
retained, and the fast weights will bear grad funcs, so as to permit
backpropagation through the optimization process. Setting this to
False allows the returned differentiable optimizer to be used in
"test mode", without potentially tracking higher order gradients.
This can be useful when running the training loop at test time,
e.g. in k-shot learning experiments, without incurring a significant
memory overhead.
Returns:
An initialized ``DifferentiableOptimizer`` instance of the right subtype.
"""
if type(opt) in _opt_mapping:
return _opt_mapping[type(opt)](
opt,
reference_params,
fmodel=fmodel,
device=device,
override=override,
track_higher_grads=track_higher_grads,
**kwargs,
)
else:
raise ValueError(
"Optimizer type {} not supported by higher yet.".format(type(opt))
)
def create_diff_optim(
opt_type: _typing.Type[_torch.optim.Optimizer],
opt_kwargs: _typing.Optional[_typing.Dict[str, _typing.Any]] = None,
params: _typing.Optional[_typing.List[_torch.Tensor]] = None,
fmodel: _typing.Optional[_patch._MonkeyPatchBase] = None,
device: _typing.Optional[_torch.device] = None,
override: _typing.Optional[_OverrideType] = None,
track_higher_grads: bool = True,
**kwargs,
) -> DifferentiableOptimizer:
r"""Construct a differentiable version of an new optimizer.
Args:
opt_type: the type (constructor) for a torch.optim.Optimizer subtype
from amongst the types supported by the library, or registered with
it a runtime.
opt_kwargs: a dictionary of keywords to be passed to the optimizer
constructor.
params (optional): a list of (fast) weights which the differentiable
optimizer will update. These must be provided if fmodel is not
provided. If both, these will be used in lieu. These will only
be used for shape inference when initializing the optimizer.
This argument can also take the same format as parameter groups,
i.e. an iterable over dictionaries which contain the 'params' key
with fast weights as value, and group-specific hyperparameters.
fmodel (optional): a patched version of the ``module`` tracked by ``opt``.
It is assumed this patched instance has a view on its latest fast
weights through ``fmodel.parameters()``. If provided, it is not
necessary to pass the fast weights explicitly to the differentiable
optimizer's ``step`` function via the keyword arg ``params``. If not
provided, the fast weights to update must be provided to ``step``.
device (optional): the device to cast the optimizer state to when
creating the differentiable optimizer. If not provided, the same
device as used for the parameters tracked by ``opt`` will be used.
override (optional): a dictionary mapping optimizer settings (i.e.
those which would be passed to the optimizer constructor or
provided within parameter groups) to either singleton lists of
override values, or to a list of override values of length equal to
the number of parameter groups. If a single override is provided for
a keyword, it is used for all parameter groups. If a list is
provided, the ``i``\ th element of the list overrides the corresponding
setting in the ``i``\ th parameter group. This permits the passing of
tensors requiring gradient to differentiable optimizers for use as
optimizer settings.
track_higher_grads: if True, during unrolled optimization the graph be
retained, and the fast weights will bear grad funcs, so as to permit
backpropagation through the optimization process. Setting this to
False allows the returned differentiable optimizer to be used in
"test mode", without potentially tracking higher order gradients.
This can be useful when running the training loop at test time,
e.g. in k-shot learning experiments, without incurring a significant
memory overhead.
Returns:
An initialized ``DifferentiableOptimizer`` instance of the right subtype.
"""
if opt_type in _opt_mapping:
if params is not None:
params = list(params)
if isinstance(params[0], dict):
dummy = [
{
k: _torch.zeros_like(v, requires_grad=True)
if k == "params"
else v
for k, v in group.items()
}
for group in params
]
else:
dummy = [_torch.zeros_like(p, requires_grad=True) for p in params]
elif fmodel is not None:
dummy = [
_torch.zeros_like(p, requires_grad=True) for p in fmodel.parameters()
]
else:
raise ValueError("Must specify one of fmodel or params in kwargs.")
opt_kwargs = {} if opt_kwargs is None else opt_kwargs
opt = opt_type(dummy, **opt_kwargs)
return _opt_mapping[opt_type](
opt,
dummy,
fmodel=fmodel,
device=device,
override=override,
track_higher_grads=track_higher_grads,
**kwargs,
)
else:
raise ValueError(
"Optimizer type {} not supported by higher yet.".format(opt_type)
)
def register_optim(
optim_type: _torch.optim.Optimizer,
diff_optim_type: _typing.Type[DifferentiableOptimizer],
) -> None:
r"""Registers a new optimizer type for use with higher functions.
Args:
optim_type: the type of a new optimizer, assumed to be an instance of
``torch.optim.Optimizer``.
diff_optim_type: the type of a new differentiable optimizer, assumed to
be an instance of ``higher.optim.DifferentiableOptimizer`` with
functionally equivalent logic to ``optim_type``.
"""
_opt_mapping[optim_type] = diff_optim_type
def get_trainable_opt_params(
opt: _torch.optim.Optimizer, device: _typing.Optional[_torch.device] = None
) -> _OverrideType:
r"""Get an override dictionary from an optimizer instance.
Args:
opt: the optimizer to obtain an override dictionary from.
device (optional): the device to cast the learnable tensors to.
Returns:
A dictionary of the format expected for the override kwarg of
differentiable optimizers. It is initialized with trainable tensors
with as values those float and int hyperparameters found in the
optimizer's parameter groups (or stuctures containing these).
Heuristically, hyperparameters containing mixtures of differentiable
and non-differentiable types will be ignored (and must be manually
specified when constructing an override dict).
"""
override: _OverrideType = _collections.defaultdict(list)
def map_fn(x: _typing.Union[_torch.Tensor, int, float]) -> _torch.Tensor:
if isinstance(x, _torch.Tensor):
return x.clone().detach().requires_grad_()
else:
return _torch.tensor(float(x), device=device, requires_grad=True)
for group in opt.param_groups:
for k, v in group.items():
if k == "params":
# Ignore actual model parameters tracked by optim
continue
# Ignore hyperparameters that aren't structures containing ints
# or floats
if all(
isinstance(x, int) or isinstance(x, float) for x in _utils.flatten(v)
):
override[k].append(_utils._recursive_map(v, map_fn))
return override
def apply_trainable_opt_params(
opt: _torch.optim.Optimizer, override: _OverrideType
) -> None:
r"""Apply learned hyperparameters back to original optimizer.
Args:
opt: the original optimizer. The hyperparameters in its parameter groups
will be modified in place.
override: dictionary of the format used for the override kwarg of
differentiable optimizers.
"""
for k, v in override.items():
# Sanity check
if (len(v) != 1) and (len(v) != len(opt.param_groups)):
raise ValueError(
"Mismatch between the number of override tensors for "
"optimizer parameter {} and the number of "
"parameter groups.".format(k)
)
for group_idx, group in enumerate(opt.param_groups):
replacement = v[0] if len(v) == 1 else v[group_idx]
group[k] = _recursive_apply(replacement, group[k])
## Local utility functions
# TODO(egrefen): use funcs below instead of x._add, in diffopt
def _add(
tensor: _torch.Tensor,
a1: _typing.Union[float, int, _torch.Tensor],
a2: _typing.Optional[_torch.Tensor] = None,
) -> _torch.Tensor:
if a2 is None:
value: _typing.Union[_torch.Tensor, float] = 1.0
other = a1
else:
value = a1
other = a2
return tensor + (value * other)
def _addcdiv(
tensor: _torch.Tensor,
a1: _typing.Union[float, int, _torch.Tensor],
a2: _torch.Tensor,
a3: _typing.Optional[_torch.Tensor] = None,
) -> _torch.Tensor:
if a3 is None:
value: _typing.Union[_torch.Tensor, float] = 1.0
tensor1 = a1
tensor2 = a2
else:
value = a1
tensor1 = a2
tensor2 = a3
return tensor + value * (tensor1 / tensor2)
def _addcmul(
tensor: _torch.Tensor,
a1: _typing.Union[float, int, _torch.Tensor],
a2: _torch.Tensor,
a3: _typing.Optional[_torch.Tensor] = None,
) -> _torch.Tensor:
if a3 is None:
value: _typing.Union[_torch.Tensor, float] = 1.0
tensor1 = a1
tensor2 = a2
else:
value = a1
tensor1 = a2
tensor2 = a3
return tensor + (value * tensor1 * tensor2)
# TODO(egrefen): this probably could be refactored into utils
def _recursive_apply(
replacement: _typing.Union[list, tuple, dict, set, _torch.Tensor],
target: _typing.Union[_torch.Tensor, int, float],
) -> _typing.Union[_torch.Tensor, int, float]:
if not isinstance(replacement, type(target)):
if isinstance(replacement, _torch.Tensor) and not _utils._is_container(target):
return type(target)(replacement.item())
raise ValueError(
"Expected an non-container type for target, but got {} with value "
"{}".format(type(target), target)
)
elif isinstance(replacement, _torch.Tensor) and isinstance(target, _torch.Tensor):
replacement = replacement.to(target.device)
target.data = replacement.data
return target
if isinstance(target, list):
return type(target)(
[_recursive_apply(r, t) for r, t in zip(replacement, target)]
)
elif isinstance(target, tuple):
return type(target)(
[_recursive_apply(r, t) for r, t in zip(replacement, target)]
)
elif isinstance(replacement, dict) and isinstance(target, dict):
return type(target)(
{
k: _recursive_apply(r, t)
for (_, r), (k, t) in zip(replacement.items(), target.items())
}
)
elif isinstance(target, set):
return type(target)(
{_recursive_apply(r, t) for r, t in zip(replacement, target)}
)
else:
raise ValueError(
"Couldn't apply replacement of type {} to target of type "
"{}".format(type(replacement), type(target))
)
|
Blackbox-Coresets-VI-main
|
psvi/robust_higher/optim.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
# The script to randomly split a dataset for hyperparameter tunning
import os
from absl import app
from absl import flags
import pdb
from datasets import load_dataset, concatenate_datasets
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input directory that contains train.tsv and test.tsv .")
flags.DEFINE_string("dataset", "", "Input dataset name. Output will be stored at dataset_hp")
def main(unused_argv):
# Concatenate train and test file
data_files = {}
data_files["train"] = FLAGS.input + '/train.tsv'
data_files["test"] = FLAGS.input + '/test.tsv'
# pdb.set_trace()
raw_datasets = load_dataset("csv", data_files=data_files, sep='\t', column_names=["input", "output"])
concat_data = concatenate_datasets([raw_datasets["train"], raw_datasets["test"]])
# Split the dataset by 90:10 train test ratio
splitted = concat_data.train_test_split(test_size=0.1, shuffle=True, seed=42)
if not os.path.exists('data/' + FLAGS.dataset + '_hp'):
os.makedirs('data/' + FLAGS.dataset + '_hp')
# Output the corresponding splits to target directory
splitted["train"].to_csv('data/' + FLAGS.dataset + '_hp' + '/train.csv', sep="\t", index=False)
splitted["test"].to_csv('data/' + FLAGS.dataset + '_hp' + '/test.csv', sep="\t", index=False)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
split_dataset_for_hp.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
# This file include utility functions to compute stats given a dataset
import re
import os
import csv
import pdb
from prettytable import PrettyTable
from torchaudio.functional import edit_distance
from transformers import AutoTokenizer
from utils.helper_utils.helper_methods import load_dataset_with_name, list_datasets_and_their_splits
def build_table_for_all_datasets(data_type, sub_datatype=None, model_name='facebook/bart-base'):
"""
Build a csv table for all data & splits
Input:
Data_type in {num_instances, raw_avg_length, tok_seq_length, lexical_overlap}
"""
base_dir = os.getenv('BASE_DIR')
# Construct table with dataset stats
tab = PrettyTable(header=True)
optim_splits = ['train', 'validation', 'test', 'gen', 'Overall']
tab.field_names = ['Dataset', 'Split'] + optim_splits
dataset_names, splits_mapping = list_datasets_and_their_splits(base_dir + '/baseline_replication/TMCD/data')
for dataset_name in dataset_names:
for split in splits_mapping[dataset_name]:
curr_row = []
curr_row.append(dataset_name)
curr_row.append(split)
# Compute the data
if data_type == 'num_instances':
res, _ = number_of_instances(dataset_name, split)
elif data_type == 'raw_avg_length':
input_avg_len, output_avg_len, _ = compute_avg_length(dataset_name, split)
if sub_datatype == 'input':
res = input_avg_len
else:
res = output_avg_len
elif data_type == 'tok_seq_length':
input_avg_len, output_avg_len, _ = compute_avg_tokenized_length_hf(dataset_name, split, target_model_name = model_name)
if sub_datatype == 'input':
res = input_avg_len
else:
res = output_avg_len
elif data_type == 'lexical_overlap':
res, _ = compute_lexical_overlap(dataset_name, split)
else:
raise ValueError('The data_type can only be {num_instances, raw_avg_length, tok_seq_length, lexical_overlap}.')
# Build the table
for optim_split in optim_splits:
if optim_split in res:
curr_row.append(res[optim_split])
elif optim_split == 'Overall' and 'avg' in res:
# For seq length, the overall equiv to avg
curr_row.append(res['avg'])
else:
curr_row.append('-')
tab.add_row(curr_row)
if not os.path.exists(base_dir + '/results/analysis_res/'):
os.makedirs(base_dir + '/results/analysis_res/')
# Construct CSV filename
file_name = data_type
if sub_datatype:
file_name += '_' + sub_datatype
if data_type == 'tok_seq_length':
if '/' in model_name:
file_name += '_' + model_name.split('/')[-1]
else:
file_name += '_' + model_name
with open(base_dir + '/results/analysis_res/' + file_name + '.csv', 'w', newline='') as f:
f.write(tab.get_csv_string())
print(tab)
def number_of_instances():
"""
Output number of instances for each dataset
Outputs:
avg_overlap (dict): avg lexical overlap between input and output, keys are train/test/dev
"""
base_dir = os.getenv('BASE_DIR')
# Construct table with dataset stats
tab = PrettyTable()
optim_splits = ['train', 'validation', 'test', 'gen', 'Overall']
tab.add_row(['Dataset', 'Split'] + optim_splits)
dataset_names, splits_mapping = list_datasets_and_their_splits(base_dir + '/baseline_replication/TMCD/data')
for dataset_name in dataset_names:
for split in splits_mapping[dataset_name]:
curr_row = []
# Load the dataset
dataset = load_dataset_with_name(dataset_name, split)
curr_row.append(dataset_name)
curr_row.append(split)
for optim_split in optim_splits:
if optim_split in dataset:
curr_row.append(len(dataset[optim_split]))
else:
curr_row.append(0.0)
# Add up the instance count for overal
curr_row[-1] = sum(curr_row[2:])
tab.add_row(curr_row)
if not os.path.exists(base_dir + '/results/analysis_res/'):
os.makedirs(base_dir + '/results/analysis_res/')
with open(base_dir + '/results/analysis_res/num_instances.csv', 'w', newline='') as f:
f.write(tab.get_csv_string())
print(tab)
def number_of_instances(dataset_name, split):
"""
Output number of instances for each dataset
Outputs:
num_instances (dict): number of instance in each optimization split, keys are train/test/dev
"""
# Construct table with dataset stats
tab = PrettyTable()
num_instances = dict()
split_names = []
overall_num = 0
num_column = []
# Load the dataset
dataset = load_dataset_with_name(dataset_name, split)
for optim_split in dataset:
split_names.append(optim_split)
num_instances[optim_split] = len(dataset[optim_split])
num_column.append(len(dataset[optim_split]))
overall_num += len(dataset[optim_split])
# Add the instance count for overal
num_column.append(overall_num)
num_instances['Overall'] = overall_num
tab.add_column('Split', split_names + ['Overall'])
tab.add_column('Number of Instances', num_column)
return num_instances, tab
def compute_avg_length(dataset_name, split):
"""
Computes the average number of words of input and output
Outputs:
input_avg_len (dict): avg number of words in input, keys are train/test/dev
output_avg_len (dict): avg number of words in output, keys are train/test/dev
tab (PrettyTable): the table with a display of dataset stat
"""
# TODO: Maybe plot the distribution of length, too?
# Load the dataset
dataset = load_dataset_with_name(dataset_name, split)
# Construct table with dataset stats
tab = PrettyTable()
input_avg_len = dict()
output_avg_len = dict()
# Loop through the split
split_names = []
dataset_lens = []
input_lens_column = []
output_lens_column = []
overall_input_len = 0
overall_output_len = 0
for ft_split in dataset:
split_names.append(ft_split)
dataset_lens.append(len(dataset[ft_split]))
tot_input_len = 0
tot_output_len = 0
for instance in dataset[ft_split]:
tot_input_len += len(re.findall(r'\w+', instance['input']))
tot_output_len += len(re.findall(r'\w+', instance['output']))
input_avg_len[ft_split] = tot_input_len / len(dataset[ft_split])
output_avg_len[ft_split] = tot_output_len / len(dataset[ft_split])
input_lens_column.append(input_avg_len[ft_split])
output_lens_column.append(output_avg_len[ft_split])
overall_input_len += tot_input_len
overall_output_len += tot_output_len
# Add the averaged length to table data for display
input_lens_column.append(overall_input_len / sum(dataset_lens))
output_lens_column.append(overall_output_len / sum(dataset_lens))
input_avg_len['avg'] = input_lens_column[-1]
output_avg_len['avg'] = output_lens_column[-1]
tab.add_column('Split', split_names + ['Overall'])
tab.add_column('Number of Instances', dataset_lens + [0])
tab.add_column('Avg input length', input_lens_column)
tab.add_column('Avg output length', output_lens_column)
return input_avg_len, output_avg_len, tab
def compute_lexical_overlap(dataset_name, split):
"""
Computes the average lexical overlap (Levenshtein distance / input_len) between input and output
Outputs:
avg_overlap (dict): avg lexical overlap between input and output, keys are train/test/dev
"""
# Load the dataset
dataset = load_dataset_with_name(dataset_name, split)
# Construct table with dataset stats
tab = PrettyTable()
avg_overlap = dict()
# Loop through the split
split_names = []
dataset_lens = []
overlap_column = []
overall_overlap = 0.0
for ft_split in dataset:
split_names.append(ft_split)
dataset_lens.append(len(dataset[ft_split]))
tot_overlap = 0.0
for instance in dataset[ft_split]:
tot_overlap += edit_distance(instance['input'], instance['output']) / len(instance['input'])
avg_overlap[ft_split] = tot_overlap / len(dataset[ft_split])
overlap_column.append(avg_overlap[ft_split])
overall_overlap += tot_overlap
# Add the averaged length to table data for display
overlap_column.append(overall_overlap / sum(dataset_lens))
avg_overlap['avg'] = overlap_column[-1]
tab.add_column('Split', split_names + ['Overall'])
tab.add_column('Number of Instaces', dataset_lens + [0])
tab.add_column('Avg Lev(input, output) / input_len', overlap_column)
return avg_overlap, tab
def compute_avg_tokenized_length_hf(dataset_name, split, target_model_name, max_seq_length=512, max_output_length=512):
"""
Computes the average number of tokens of input and output after tokenization
Inputs:
dataset_name={COGS, geoquery, spider, SCAN}
target_model_name=model name from Huggingface that has a tokenizer or a path
Outputs:
input_avg_len (dict): avg number of tokens in input, keys are train/test/dev
output_avg_len (dict): avg number of tokens in output, keys are train/test/dev
"""
# Construct table with dataset stats
tab = PrettyTable()
input_avg_len = dict()
output_avg_len = dict()
# Load the dataset
dataset = load_dataset_with_name(dataset_name, split)
# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained(target_model_name, use_fast=True)
# Loop through the split
split_names = []
dataset_lens = []
input_lens_column = []
output_lens_column = []
overall_input_len = 0
overall_output_len = 0
for optim_split in dataset:
# Tokenize
inputs = dataset[optim_split]['input']
if 't5' in target_model_name:
inputs = ['semanticparse: ' + x for x in inputs]
else:
inputs = [x for x in inputs]
model_inputs = tokenizer(inputs, max_length=max_seq_length, truncation=True)
with tokenizer.as_target_tokenizer():
labels = tokenizer(dataset[optim_split]['output'], max_length=max_output_length, truncation=True)
# Compute the length
split_names.append(optim_split)
dataset_lens.append(len(dataset[optim_split]))
tot_input_len = 0
tot_output_len = 0
for input_tok, output_tok in zip(model_inputs['input_ids'], labels['input_ids']):
tot_input_len += len(input_tok)
tot_output_len += len(input_tok)
input_avg_len[optim_split] = tot_input_len / len(dataset[optim_split])
output_avg_len[optim_split] = tot_output_len / len(dataset[optim_split])
input_lens_column.append(input_avg_len[optim_split])
output_lens_column.append(output_avg_len[optim_split])
overall_input_len += tot_input_len
overall_output_len += tot_output_len
# Add the averaged length to table data for display
input_lens_column.append(overall_input_len / sum(dataset_lens))
output_lens_column.append(overall_output_len / sum(dataset_lens))
input_avg_len['avg'] = input_lens_column[-1]
output_avg_len['avg'] = output_lens_column[-1]
tab.add_column('Split', split_names + ['Overall'])
tab.add_column('Number of Instances', dataset_lens + [0])
tab.add_column('Avg input length', input_lens_column)
tab.add_column('Avg output length', output_lens_column)
return input_avg_len, output_avg_len, tab
|
CompGenRep_MLRC2022-main
|
utils/dataset_stat.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
import os
BASE_DIR = os.environ.get('BASE_DIR')
MODEL_DIR = os.path.join(BASE_DIR, 'trained_models/')
TMCD_MODEL_DIR = os.path.join(BASE_DIR, 'baseline_replication/TMCD/trained_models/')
DATA_DIR = os.path.join(BASE_DIR, 'data/')
TMCD_DATA_DIR = os.path.join(BASE_DIR, 'baseline_replication/TMCD/data/')
TMCD_DATASETS = {'SCAN', 'geoquery', 'spider'}
|
CompGenRep_MLRC2022-main
|
utils/constants.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
import os
import json
from constants import TMCD_DATASETS, TMCD_MODEL_DIR, MODEL_DIR
def load_training_curve_info(model_name, dataset, split, checkpoint=None):
"""
Returns steps [list], ems [list], best_em float
"""
ems = []
steps = []
best_em = 0.0
# Find the path to the model
if dataset in TMCD_DATASETS:
# Load the model in TMCD data dir
path = os.path.join(TMCD_MODEL_DIR, dataset, model_name + '_' + split + '_1e-4')
else:
path = os.path.join(MODEL_DIR, dataset, model_name + '_' + split + '_1e-4')
if checkpoint is not None:
path = os.path.join(path, 'checkpoint-' + checkpoint)
# Load the model's trainer_state
trainer_state = json.load(open(path + '/trainer_state.json'))
for metrics in trainer_state['log_history']:
if 'eval_exact_match' in metrics:
ems.append(metrics['eval_exact_match'])
steps.append(metrics['step'])
if metrics['eval_exact_match'] > best_em:
best_em = metrics['eval_exact_match']
return steps, ems, best_em
|
CompGenRep_MLRC2022-main
|
utils/analysis_utils.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.