python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
import argparse
import json
from utils.file_utils import TsvIO
from utils.file_utils import read_lines
import pandas as pd
def jsonl_to_tsv(jsonl_file, output_file, sep):
records = [json.loads(line) for line in read_lines(jsonl_file)]
if sep == "\t":
TsvIO.write(records, filename=output_file, schema=records[0].keys(), sep=sep)
else:
df = pd.DataFrame(records)
df.to_csv(output_file, index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='tsv_to_jsonl.py',
usage='%(prog)s tsv_file',
description='Identify seed set of entities filtered by Google N-Gram counts'
)
parser.add_argument('--jsonl_file', type=str,
help='Source of seed entities',
default='conceptnet')
parser.add_argument('--output_file', type=str,
help='Location of output file')
parser.add_argument('--sep', type=str,
help='Location of output file',
default='\t')
args = parser.parse_args()
# Run seed selection if args valid
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=True))
print("=======================")
jsonl_to_tsv(args.jsonl_file, args.output_file, args.sep)
|
abductive-commonsense-reasoning-master
|
utils/jsonl_to_tsv.py
|
abductive-commonsense-reasoning-master
|
utils/__init__.py
|
|
from typing import List
import json
import gzip
import csv
def write_items(items: List[str], output_file):
with open(output_file, 'w') as f:
for item in items:
f.write(str(item) + "\n")
f.close()
def read_lines(input_file: str) -> List[str]:
lines = []
with open(input_file, "rb") as f:
for l in f:
lines.append(l.decode().strip())
return lines
def read_jsonl_lines(input_file: str) -> List[dict]:
with open(input_file) as f:
lines = f.readlines()
return [json.loads(l.strip()) for l in lines]
class TsvIO(object):
@staticmethod
def read(filename, known_schema=None, sep="\t", gzipped=False, source=None):
"""
Read a TSV file with schema in the first line.
:param filename: TSV formatted file
:param first_line_schema: True if the first line is known to contain the schema of the
tsv file. False by default.
:param sep: Separator used in the file. Default is '\t`
:return: A list of data records where each record is a dict. The keys of the dict
correspond to the column name defined in the schema.
"""
first = True
if gzipped:
fn = gzip.open
else:
fn = open
line_num = 0
with fn(filename, 'rt') as f:
for line in f:
if first and known_schema is None:
first = False
known_schema = line.split(sep)
known_schema = [s.strip() for s in known_schema]
else:
line_num += 1
data_fields = line.split(sep)
data = {k.strip(): v.strip() for k, v in zip(known_schema, data_fields)}
data['source'] = filename if source is None else source
data['line_num'] = line_num
yield data
f.close()
@staticmethod
def make_str(item, sub_sep="\t"):
if isinstance(item, list):
return sub_sep.join([TsvIO.make_str(i) for i in item])
else:
return str(item)
@staticmethod
def write(records: List[dict], filename, schema=None, sep='\t', append=False, sub_sep=';'):
"""
Write a TSV formatted file with the provided schema
:param records: List of records to be written to the file
populated
:param filename: Output filename
:param schema: Order in which fields from the Sentence object will be written
:param sep: Separator used in the file. Default is '\t`
:param append: Whether to use append mode or write a new file
:param sub_sep: If a field contains a list of items in JSON, this seperator will be used
to separate values in the list
:return:
"""
mode = 'a' if append else 'w'
if sep == "\t":
with open(filename, mode) as f:
if schema is not None and not append:
f.write(sep.join(schema) + "\n")
for record in records:
f.write(sep.join([TsvIO.make_str(record.__getitem__(field), sub_sep=sub_sep) for
field in schema]))
f.write('\n')
f.close()
elif sep == ",":
with open(filename, mode) as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=schema)
writer.writeheader()
for record in records:
writer.writerow(record)
csvfile.close()
|
abductive-commonsense-reasoning-master
|
utils/file_utils.py
|
cartography-main
|
cartography/__init__.py
|
|
"""
Utilities for data handling.
"""
import json
import logging
import os
import pandas as pd
import shutil
from typing import Dict
from cartography.data_utils_glue import read_glue_tsv
logger = logging.getLogger(__name__)
def read_data(file_path: str,
task_name: str,
guid_as_int: bool = False):
"""
Reads task-specific datasets from corresponding GLUE-style TSV files.
"""
logger.warning("Data reading only works when data is in TSV format, "
" and last column as classification label.")
# `guid_index`: should be 2 for SNLI, 0 for MNLI and None for any random tsv file.
if task_name == "MNLI":
return read_glue_tsv(file_path,
guid_index=0,
guid_as_int=guid_as_int)
elif task_name == "SNLI":
return read_glue_tsv(file_path,
guid_index=2,
guid_as_int=guid_as_int)
elif task_name == "WINOGRANDE":
return read_glue_tsv(file_path,
guid_index=0)
elif task_name == "QNLI":
return read_glue_tsv(file_path,
guid_index=0)
else:
raise NotImplementedError(f"Reader for {task_name} not implemented.")
def convert_tsv_entries_to_dataframe(tsv_dict: Dict, header: str) -> pd.DataFrame:
"""
Converts entries from TSV file to Pandas DataFrame for faster processing.
"""
header_fields = header.strip().split("\t")
data = {header: [] for header in header_fields}
for line in tsv_dict.values():
fields = line.strip().split("\t")
assert len(header_fields) == len(fields)
for field, header in zip(fields, header_fields):
data[header].append(field)
df = pd.DataFrame(data, columns=header_fields)
return df
def copy_dev_test(task_name: str,
from_dir: os.path,
to_dir: os.path,
extension: str = ".tsv"):
"""
Copies development and test sets (for data selection experiments) from `from_dir` to `to_dir`.
"""
if task_name == "MNLI":
dev_filename = "dev_matched.tsv"
test_filename = "dev_mismatched.tsv"
elif task_name in ["SNLI", "QNLI", "WINOGRANDE"]:
dev_filename = f"dev{extension}"
test_filename = f"test{extension}"
else:
raise NotImplementedError(f"Logic for {task_name} not implemented.")
dev_path = os.path.join(from_dir, dev_filename)
if os.path.exists(dev_path):
shutil.copyfile(dev_path, os.path.join(to_dir, dev_filename))
else:
raise ValueError(f"No file found at {dev_path}")
test_path = os.path.join(from_dir, test_filename)
if os.path.exists(test_path):
shutil.copyfile(test_path, os.path.join(to_dir, test_filename))
else:
raise ValueError(f"No file found at {test_path}")
def read_jsonl(file_path: str, key: str = "pairID"):
"""
Reads JSONL file to recover mapping between one particular key field
in the line and the result of the line as a JSON dict.
If no key is provided, return a list of JSON dicts.
"""
df = pd.read_json(file_path, lines=True)
records = df.to_dict('records')
logger.info(f"Read {len(records)} JSON records from {file_path}.")
if key:
assert key in df.columns
return {record[key]: record for record in records}
return records
|
cartography-main
|
cartography/data_utils.py
|
import logging
import random
import re
import tqdm
logger = logging.getLogger(__name__)
def convert_string_to_unique_number(string: str) -> int:
"""
Hack to convert SNLI ID into a unique integer ID, for tensorizing.
"""
id_map = {'e': '0', 'c': '1', 'n': '2'}
# SNLI-specific hacks.
if string.startswith('vg_len'):
code = '555'
elif string.startswith('vg_verb'):
code = '444'
else:
code = '000'
try:
number = int(code + re.sub(r"\D", "", string) + id_map.get(string[-1], '3'))
except:
number = random.randint(10000, 99999)
logger.info(f"Cannot find ID for {string}, using random number {number}.")
return number
def read_glue_tsv(file_path: str,
guid_index: int,
label_index: int = -1,
guid_as_int: bool = False):
"""
Reads TSV files for GLUE-style text classification tasks.
Returns:
- a mapping between the example ID and the entire line as a string.
- the header of the TSV file.
"""
tsv_dict = {}
i = -1
with open(file_path, 'r') as tsv_file:
for line in tqdm.tqdm([line for line in tsv_file]):
i += 1
if i == 0:
header = line.strip()
field_names = line.strip().split("\t")
continue
fields = line.strip().split("\t")
label = fields[label_index]
if len(fields) > len(field_names):
# SNLI / MNLI fields sometimes contain multiple annotator labels.
# Ignore all except the gold label.
reformatted_fields = fields[:len(field_names)-1] + [label]
assert len(reformatted_fields) == len(field_names)
reformatted_line = "\t".join(reformatted_fields)
else:
reformatted_line = line.strip()
if label == "-" or label == "":
logger.info(f"Skippping line: {line}")
continue
if guid_index is None:
guid = i
else:
guid = fields[guid_index] # PairID.
if guid in tsv_dict:
logger.info(f"Found clash in IDs ... skipping example {guid}.")
continue
tsv_dict[guid] = reformatted_line.strip()
logger.info(f"Read {len(tsv_dict)} valid examples, with unique IDS, out of {i} from {file_path}")
if guid_as_int:
tsv_numeric = {int(convert_string_to_unique_number(k)): v for k, v in tsv_dict.items()}
return tsv_numeric, header
return tsv_dict, header
|
cartography-main
|
cartography/data_utils_glue.py
|
from transformers.data.processors.glue import QnliProcessor
class AdaptedQnliProcessor(QnliProcessor):
def get_examples(self, data_file, set_type):
return self._create_examples(self._read_tsv(data_file), set_type=set_type)
|
cartography-main
|
cartography/classification/qnli_utils.py
|
import json
import os
class Params:
"""
All configuration required for running glue using transformers.
"""
def __init__(self, MODEL_CLASSES, ALL_MODELS, processors, configs):
### Required parameters
# Directory where task data resides.
self.data_dir : str = configs["data_dir"]
# Input data
self.train : str = configs.get("train", None)
self.dev : str = configs.get("dev", None)
self.test: str = configs.get("test", None)
# One of 'bert', 'roberta', etc.
self.model_type : str = configs["model_type"]
assert self.model_type in MODEL_CLASSES.keys()
# Path to pre-trained model or shortcut name from `ALL_MODELS`.
self.model_name_or_path : str = configs["model_name_or_path"]
assert self.model_name_or_path in ALL_MODELS
# The name of the task to train.
self.task_name : str = configs["task_name"]
assert self.task_name.lower() in processors.keys()
# Random seed for initialization.
self.seed : int = configs["seed"]
# The output directory where the model predictions and checkpoints will be written.
self.output_dir : str = configs["output_dir"]
# Whether to run training.
self.do_train : bool = configs.get("do_train", False)
# Whether to run eval on the dev set.
self.do_eval : bool = configs.get("do_eval", False)
# Whether to run eval on the dev set.
self.do_test : bool = configs.get("do_test", False)
### Other parameters
# Pretrained config name or path if not the same as `model_name`.
self.config_name : str = configs.get("config_name", "")
# Pretrained tokenizer name or path if not the same as `model_name`.
self.tokenizer_name : str = configs.get("tokenizer_name", "")
# Where to store the pre-trained models downloaded from s3:// location.
self.cache_dir : str = configs.get("cache_dir", "")
# Where to store the feature cache for the model.
self.features_cache_dir : str = configs.get("features_cache_dir",
os.path.join(self.data_dir, f"cache_{self.seed}"))
# The maximum total input sequence length after tokenization.
# Sequences longer than this will be truncated,
# sequences shorter will be padded.
self.max_seq_length : int = configs.get("max_seq_length", 128)
# Run evaluation during training after each epoch.
self.evaluate_during_training : bool = configs.get("evaluate_during_training", True)
# Run evaluation during training at each logging step.
self.evaluate_during_training_epoch : bool = configs.get("evaluate_during_training_epoch",
False)
# Set this flag if you are using an uncased model.
self.do_lower_case : bool = configs.get("do_lower_case", True)
# Batch size per GPU/CPU for training.
self.per_gpu_train_batch_size : int = configs.get("per_gpu_train_batch_size", 96)
# Batch size per GPU/CPU for evaluation.
self.per_gpu_eval_batch_size : int = configs.get("per_gpu_eval_batch_size", 96)
# Number of updates steps to accumulate before
# performing a backward/update pass.
self.gradient_accumulation_steps : int = configs.get("gradient_accumulation_steps", 1)
# The initial learning rate for Adam.
self.learning_rate : float = configs.get("learning_rate", 1e-5)
# Weight decay if we apply some.
self.weight_decay : float = configs.get("weight_decay", 0.0)
# Epsilon for Adam optimizer.
self.adam_epsilon : float = configs.get("adam_epsilon", 1e-8)
# Max gradient norm.
self.max_grad_norm : float = configs.get("max_grad_norm", 1.0)
# Total number of training epochs to perform.
self.num_train_epochs : float = configs.get("num_train_epochs", 3.0)
# If > 0 : set total number of training steps to perform.
# Override num_train_epochs.
self.max_steps : int = configs.get("max_steps", -1)
# Linear warmup over warmup_steps.
self.warmup_steps : int = configs.get("warmup_steps", 0)
# Log every X updates steps.
self.logging_steps : int = configs.get("logging_steps", 1000)
# If dev performance does not improve in X updates, end training.
self.patience : int = configs.get("patience", 3)
# Save checkpoint every X updates steps.
self.save_steps:int = configs.get("save_steps", 0)
# Evaluate all checkpoints starting with the same prefix as
# model_name ending and ending with step number
self.eval_all_checkpoints : bool = configs.get("eval_all_checkpoints", False)
# Avoid using CUDA when available
self.no_cuda : bool = configs.get("no_cuda", False)
# Overwrite the content of the output directory
self.overwrite_output_dir : bool = configs.get("overwrite_output_dir", False)
# Overwrite the cached training and evaluation sets
self.overwrite_cache : bool = configs.get("overwrite_cache", False)
# Whether to use 16-bit (mixed) precision (through NVIDIA apex)
# instead of 32-bit
self.fp16 : bool = configs.get("fp16", False)
# For fp16 : Apex AMP optimization level selected in
# ['O0', 'O1', 'O2', and 'O3'].
# See details at https://nvidia.github.io/apex/amp.html"
self.fp16_opt_level : str = configs.get("fp16_opt_level", "01")
# For distributed training.
self.local_rank : int = configs.get("local_rank", -1)
# For distant debugging.
self.server_ip : str = configs.get("server_ip", "")
self.server_port : str = configs.get("server_port", "")
def save_args_to_file(params: Params, mode: str = ""):
"""
Saves the configs in `Params` to a json file, during train or eval mode.
"""
with open(os.path.join(params.output_dir, f"cartography_config_{mode}.json"), "w") as outfile:
writable_params = vars(params)
# torch.device needs to be cast into a string to be json compatible.
writable_params["device"] = str(params.device)
outfile.write(json.dumps(writable_params, indent=4, sort_keys=True) + "\n")
|
cartography-main
|
cartography/classification/params.py
|
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import (BertForMultipleChoice,
BertForSequenceClassification,
RobertaForMultipleChoice,
RobertaForSequenceClassification)
class AdaptedRobertaForSequenceClassification(RobertaForSequenceClassification):
def __init__(self, config):
super().__init__(config)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import RobertaTokenizer, RobertaForSequenceClassification
import torch
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForSequenceClassification.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs # Modified from original `Transformers` since we need sequence output to summarize.
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, sequence_output, pooled_sequence_output, (hidden_states), (attentions)
class AdaptedRobertaForMultipleChoice(RobertaForMultipleChoice):
def __init__(self, config):
super().__init__(config)
def forward(
self,
input_ids=None,
token_type_ids=None,
attention_mask=None,
labels=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
loss (:obj:`torch.FloatTensor`` of shape ``(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import RobertaTokenizer, RobertaForMultipleChoice
import torch
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForMultipleChoice.from_pretrained('roberta-base')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s, add_special_tokens=True) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
num_choices = input_ids.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
outputs = self.roberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs # Modified from original `Transformers` since we need logits.
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
class AdaptedBertForMultipleChoice(BertForMultipleChoice):
def __init__(self, config):
super().__init__(config)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForMultipleChoice
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s, add_special_tokens=True) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
num_choices = input_ids.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1))
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs # Modified from original `Transformers` since we need logits.
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
class AdaptedBertForSequenceClassification(BertForSequenceClassification):
def __init__(self, config):
super().__init__(config)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForSequenceClassification
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs #[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
|
cartography-main
|
cartography/classification/models.py
|
import argparse
import logging
import numpy as np
import os
from collections import defaultdict
from sklearn.metrics import matthews_corrcoef
from cartography.data_utils_glue import read_glue_tsv, convert_string_to_unique_number
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
# Use the same fields as GLUE SNLI test + additional field for Diagnostic NLI.
FIELDS = ["index", "captionID", "pairID", "sentence1_binary_parse", "sentence2_binary_parse",
"sentence1_parse", "sentence2_parse", "sentence1", "sentence2", "category", "gold_label",
]
LOGIC = ["Negation", "Double negation", "Intervals/Numbers", "Conjunction", "Disjunction",
"Conditionals", "Universal", "Existential", "Temporal", "Upward monotone",
"Downward monotone", "Non-monotone",
]
LEXSEM = ["Lexical entailment", "Morphological negation", "Factivity", "Symmetry/Collectivity",
"Redundancy", "Named entities", "Quantifiers",
]
PAS = ["Core args", "Prepositional phrases", "Ellipsis/Implicits", "Anaphora/Coreference",
"Active/Passive", "Nominalization", "Genitives/Partitives", "Datives", "Relative clauses",
"Coordination scope", "Intersectivity", "Restrictivity",
]
KNOWLEDGE = ["Common sense", "World knowledge",
]
# Based on paper: https://openreview.net/pdf?id=rJ4km2R5t7
category_names = {"logic": 364,
"predicate_argument_structure": 424,
"lexical_semantics": 368,
"knowledge": 284}
coarse_to_fine = {"logic": LOGIC,
"predicate_argument_structure": PAS,
"lexical_semantics": LEXSEM,
"knowledge": KNOWLEDGE}
fine_to_coarse = {}
for coarse_cat, category in coarse_to_fine.items():
for fine_cat in category:
assert fine_cat not in fine_to_coarse
fine_to_coarse[fine_cat] = coarse_cat
def label_balance(label_list):
distribution = defaultdict(int)
for label in label_list:
distribution[label] += 1
for label in distribution:
distribution[label] /= len(label_list)
return np.std(list(distribution.values()))
def determine_categories_by_fields(fields):
example_categories = []
for field in fields[:-4]:
if field == '':
continue
elif ";" in field:
example_categories.append(fine_to_coarse[field.split(";")[0]]) # Usually same coarse category.
else:
example_categories.append(fine_to_coarse[field])
return example_categories
def diag_test_modifier(original_diag_tsv, output_tsv):
"""Modify the TSV file provided for Diagnostic NLI tests to follow the same
format as the other test files for GLUE NLI."""
diag_original, diag_headers = read_glue_tsv(original_diag_tsv, guid_index=None)
coarse_category_counter = {name: 0 for name in category_names}
with open(output_tsv, "w") as outfile:
outfile.write("\t".join(FIELDS) + "\n")
lines_with_missing_fields = 0
multiple_categories = 0
written = 0
for i, (key, line) in enumerate(diag_original.items()):
in_fields = line.strip().split("\t")
if len(in_fields) < 8:
# logger.info(f"Line with missing fields: {len(in_fields)} out of 8.\n {in_fields}")
lines_with_missing_fields += 1
example_categories = determine_categories_by_fields(fields=in_fields)
for ec in example_categories:
coarse_category_counter[ec] += 1
if len(example_categories) > 1:
# logger.info(f"{len(category)} Categories : {category} \n {in_fields[:-4]}")
multiple_categories += 1
elif not len(example_categories):
logger.info(f"No category found:\n {line}")
# HACK: from my understanding, this is an example of factivity.
example_categories = ["lexical_semantics"]
guid = str(i)
out_record = {"index": guid,
"captionID": guid,
"pairID": guid,
"sentence1_binary_parse": "",
"sentence2_binary_parse": "",
"sentence1_parse": "",
"sentence2_parse": "",
"gold_label": in_fields[-1],
"sentence2": in_fields[-2],
"sentence1": in_fields[-3],
"category": ";".join(example_categories)}
out_fields = [out_record[field] if field in out_record else "" for field in FIELDS]
outfile.write("\t".join(out_fields) + "\n")
written += 1
for c, count in coarse_category_counter.items():
logger.info(f"Items in {c}: {count}")
assert category_names[c] == count
logger.info(f"Total records: {len(diag_original)}")
logger.info(f"Records with missing fields: {lines_with_missing_fields}.")
logger.info(f"Records with 2+ categories: {multiple_categories}.")
logger.info(f"Total records written: {written} to {output_tsv}")
def evaluate_by_category(predictions,
eval_filename,
mnli_hack = False,
diagnostics_file_carto = None,
diagnostics_file_original="/home/swabhas/diagnostics_nli/diagnostic-full.tsv"):
if not diagnostics_file_carto and not os.path.exists(diagnostics_file_carto):
diag_test_modifier(diagnostics_file_original, diagnostics_file_carto)
diagnostics_orig, diag_headers = read_glue_tsv(diagnostics_file_carto)
diagnostics = {convert_string_to_unique_number(key): val for key, val in diagnostics_orig.items()}
# Category-wise counts.
coarse_gold_labels = {key: [] for key in coarse_to_fine}
coarse_predicted_labels = {key: [] for key in coarse_to_fine}
# Some examples span multiple categories; maintain a global count to avoid overcounting.
predicted_labels = []
gold_labels = []
if mnli_hack: # Workaround for HuggingFace Transformers hack.
logger.warning("WARNING: EMPLOYING HACK! "
"In HuggingFace Transformers, MNLI labels are swapped in the RoBERTa model."
"See: https://github.com/huggingface/transformers/blob/v2.8.0/examples/run_glue.py#L350"
"Hence, for evaluation, these need to be swapped back.")
with open(eval_filename, "w") as outfile:
for p in predictions:
guid = p["guid"]
if guid not in diagnostics:
raise ValueError(f"Could not find predicted GUID: {p['guid']} in Diagnostic NLI test")
gold_record = diagnostics[guid]
gold_fields = gold_record.strip().split("\t")
assert len(FIELDS) == len(gold_fields)
gold_labels.append(gold_fields[-1])
if mnli_hack:
if p["label"] == "contradiction":
p["label"] = "entailment"
elif p["label"] == "entailment":
p["label"] = "contradiction"
predicted_labels.append(p["label"])
diagnostic_categories = gold_fields[-2].split(";")
for c in diagnostic_categories:
coarse_gold_labels[c].append(gold_fields[-1])
coarse_predicted_labels[c].append(p["label"])
logged_results = []
for cat, total in coarse_gold_labels.items():
cat_r3 = matthews_corrcoef(y_true=coarse_gold_labels[cat], y_pred=coarse_predicted_labels[cat])
cat_results = (np.array(coarse_gold_labels[cat]) == np.array(coarse_predicted_labels[cat]))
cat_acc = cat_results.mean()
logged_results.append(f"{cat:30}: {cat_acc:.4f} r3: {cat_r3:.4f}"
f" {cat_results.sum()}/{len(coarse_gold_labels[cat]):4}"
f" class-var: {label_balance(coarse_gold_labels[cat]):.3f}")
overall_results = np.array(gold_labels) == np.array(predicted_labels)
overall_accuracy = overall_results.mean()
overall_r3 = matthews_corrcoef(y_true=gold_labels, y_pred=predicted_labels)
logged_results.append(f"{'total acc':30}: {overall_accuracy:.4f} r3: {overall_r3:.4f}"
f" {overall_results.sum()}/{len(gold_labels):4}"
f" class-var: {label_balance(gold_labels):.3f}")
for lr in logged_results:
logger.info(lr)
outfile.write(lr+"\n")
logger.info(f"Results written to {eval_filename}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--diagnostics_input",
"-i",
type=os.path.abspath,
default="/home/swabhas/diagnostic_nli/diagnostic-full.tsv")
parser.add_argument("--output",
"-o",
type=os.path.abspath,
default="/home/swabhas/data/glue/SNLI/diagnostics_test_bugfree.tsv")
args = parser.parse_args()
logger.info(args)
diag_test_modifier(args.diagnostics_input, args.output)
|
cartography-main
|
cartography/classification/diagnostics_evaluation.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Finetuning the library models for sequence classification on GLUE-style tasks
(BERT, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa); modified for Dataset Cartography.
"""
import _jsonnet
import argparse
import glob
import json
import logging
import numpy as np
import os
import random
import shutil
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertTokenizer,
RobertaConfig,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from cartography.classification.glue_utils import adapted_glue_compute_metrics as compute_metrics
from cartography.classification.glue_utils import adapted_glue_convert_examples_to_features as convert_examples_to_features
from cartography.classification.glue_utils import glue_output_modes as output_modes
from cartography.classification.glue_utils import glue_processors as processors
from cartography.classification.diagnostics_evaluation import evaluate_by_category
from cartography.classification.models import (
AdaptedBertForMultipleChoice,
AdaptedBertForSequenceClassification,
AdaptedRobertaForMultipleChoice,
AdaptedRobertaForSequenceClassification
)
from cartography.classification.multiple_choice_utils import convert_mc_examples_to_features
from cartography.classification.params import Params, save_args_to_file
from cartography.selection.selection_utils import log_training_dynamics
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
RobertaConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, AdaptedBertForSequenceClassification, BertTokenizer),
"bert_mc": (BertConfig, AdaptedBertForMultipleChoice, BertTokenizer),
"roberta": (RobertaConfig, AdaptedRobertaForSequenceClassification, RobertaTokenizer),
"roberta_mc": (RobertaConfig, AdaptedRobertaForMultipleChoice, RobertaTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(
train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (
len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_this_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_this_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(f" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {global_step}")
logger.info(f" Will skip the first {steps_trained_in_this_epoch} steps in the first epoch")
tr_loss, logging_loss, epoch_loss = 0.0, 0.0, 0.0
model.zero_grad()
train_iterator = trange(epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
set_seed(args) # Added here for reproductibility
best_dev_performance = 0
best_epoch = epochs_trained
train_acc = 0.0
for epoch, _ in enumerate(train_iterator):
epoch_iterator = tqdm(train_dataloader,
desc="Iteration",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
train_iterator.set_description(f"train_epoch: {epoch} train_acc: {train_acc:.4f}")
train_ids = None
train_golds = None
train_logits = None
train_losses = None
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_this_epoch > 0:
steps_trained_in_this_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if train_logits is None: # Keep track of training dynamics.
train_ids = batch[4].detach().cpu().numpy()
train_logits = outputs[1].detach().cpu().numpy()
train_golds = inputs["labels"].detach().cpu().numpy()
train_losses = loss.detach().cpu().numpy()
else:
train_ids = np.append(train_ids, batch[4].detach().cpu().numpy())
train_logits = np.append(train_logits, outputs[1].detach().cpu().numpy(), axis=0)
train_golds = np.append(train_golds, inputs["labels"].detach().cpu().numpy())
train_losses = np.append(train_losses, loss.detach().cpu().numpy())
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if (
args.local_rank in [-1, 0] and
args.logging_steps > 0 and
global_step % args.logging_steps == 0
):
epoch_log = {}
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training_epoch:
logger.info(f"From within the epoch at step {step}")
results, _ = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
epoch_log[eval_key] = value
epoch_log["learning_rate"] = scheduler.get_lr()[0]
epoch_log["loss"] = (tr_loss - logging_loss) / args.logging_steps
logging_loss = tr_loss
for key, value in epoch_log.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**epoch_log, **{"step": global_step}}))
if (
args.local_rank in [-1, 0] and
args.save_steps > 0 and
global_step % args.save_steps == 0
):
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
epoch_iterator.set_description(f"lr = {scheduler.get_lr()[0]:.8f}, "
f"loss = {(tr_loss-epoch_loss)/(step+1):.4f}")
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
#### Post epoch eval ####
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training:
best_dev_performance, best_epoch = save_model(
args, model, tokenizer, epoch, best_epoch, best_dev_performance)
# Keep track of training dynamics.
log_training_dynamics(output_dir=args.output_dir,
epoch=epoch,
train_ids=list(train_ids),
train_logits=list(train_logits),
train_golds=list(train_golds))
train_result = compute_metrics(args.task_name, np.argmax(train_logits, axis=1), train_golds)
train_acc = train_result["acc"]
epoch_log = {"epoch": epoch,
"train_acc": train_acc,
"best_dev_performance": best_dev_performance,
"avg_batch_loss": (tr_loss - epoch_loss) / args.per_gpu_train_batch_size,
"learning_rate": scheduler.get_lr()[0],}
epoch_loss = tr_loss
logger.info(f" End of epoch : {epoch}")
with open(os.path.join(args.output_dir, f"eval_metrics_train.json"), "a") as toutfile:
toutfile.write(json.dumps(epoch_log) + "\n")
for key, value in epoch_log.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(f" {key}: {value:.6f}")
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
elif args.evaluate_during_training and epoch - best_epoch >= args.patience:
logger.info(f"Ran out of patience. Best epoch was {best_epoch}. "
f"Stopping training at epoch {epoch} out of {args.num_train_epochs} epochs.")
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def save_model(args, model, tokenizer, epoch, best_epoch, best_dev_performance):
results, _ = evaluate(args, model, tokenizer, prefix="in_training")
# TODO(SS): change hard coding `acc` as the desired metric, might not work for all tasks.
desired_metric = "acc"
dev_performance = results.get(desired_metric)
if dev_performance > best_dev_performance:
best_epoch = epoch
best_dev_performance = dev_performance
# Save model checkpoint
# Take care of distributed/parallel training
model_to_save = (model.module if hasattr(model, "module") else model)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
logger.info(f"*** Found BEST model, and saved checkpoint. "
f"BEST dev performance : {dev_performance:.4f} ***")
return best_dev_performance, best_epoch
def evaluate(args, model, tokenizer, prefix="", eval_split="dev"):
# We do not really need a loop to handle MNLI double evaluation (matched, mis-matched).
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
results = {}
all_predictions = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(
args, eval_task, tokenizer, evaluate=True, data_split=f"{eval_split}_{prefix}")
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info(f"***** Running {eval_task} {prefix} evaluation on {eval_split} *****")
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.eval_batch_size}")
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
example_ids = []
gold_labels = []
for batch in tqdm(eval_dataloader, desc="Evaluating", mininterval=10, ncols=100):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
example_ids += batch[4].tolist()
gold_labels += batch[3].tolist()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
probs = torch.nn.functional.softmax(torch.Tensor(preds), dim=-1)
max_confidences = (torch.max(probs, dim=-1)[0]).tolist()
preds = np.argmax(preds, axis=1) # Max of logit is the same as max of probability.
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(
eval_output_dir, f"eval_metrics_{eval_task}_{eval_split}_{prefix}.json")
logger.info(f"***** {eval_task} {eval_split} results {prefix} *****")
for key in sorted(result.keys()):
logger.info(f"{eval_task} {eval_split} {prefix} {key} = {result[key]:.4f}")
with open(output_eval_file, "a") as writer:
writer.write(json.dumps(results) + "\n")
# predictions
all_predictions[eval_task] = []
output_pred_file = os.path.join(
eval_output_dir, f"predictions_{eval_task}_{eval_split}_{prefix}.lst")
with open(output_pred_file, "w") as writer:
logger.info(f"***** Write {eval_task} {eval_split} predictions {prefix} *****")
for ex_id, pred, gold, max_conf, prob in zip(
example_ids, preds, gold_labels, max_confidences, probs.tolist()):
record = {"guid": ex_id,
"label": processors[args.task_name]().get_labels()[pred],
"gold": processors[args.task_name]().get_labels()[gold],
"confidence": max_conf,
"probabilities": prob}
all_predictions[eval_task].append(record)
writer.write(json.dumps(record) + "\n")
return results, all_predictions
def load_dataset(args, task, eval_split="train"):
processor = processors[task]()
if eval_split == "train":
if args.train is None:
examples = processor.get_train_examples(args.data_dir)
else:
examples = processor.get_examples(args.train, "train")
elif "dev" in eval_split:
if args.dev is None:
examples = processor.get_dev_examples(args.data_dir)
else:
examples = processor.get_examples(args.dev, "dev")
elif "test" in eval_split:
if args.test is None:
examples = processor.get_test_examples(args.data_dir)
else:
examples = processor.get_examples(args.test, "test")
else:
raise ValueError(f"eval_split should be train / dev / test, but was given {eval_split}")
return examples
def get_winogrande_tensors(features):
def select_field(features, field):
return [[choice[field] for choice in feature.choices_features] for feature in features]
# Convert to Tensors and build dataset
input_ids = torch.tensor(select_field(features, "input_ids"), dtype=torch.long)
input_mask = torch.tensor(select_field(features, "input_mask"), dtype=torch.long)
segment_ids = torch.tensor(select_field(features, "segment_ids"), dtype=torch.long)
label_ids = torch.tensor([f.label for f in features], dtype=torch.long)
example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)
dataset = TensorDataset(input_ids, input_mask, segment_ids, label_ids, example_ids)
return dataset
def load_and_cache_examples(args, task, tokenizer, evaluate=False, data_split="train"):
if args.local_rank not in [-1, 0] and not evaluate:
# Make sure only the first process in distributed training process the dataset,
# and the others will use the cache
torch.distributed.barrier()
processor = processors[task]()
output_mode = output_modes[task]
if not os.path.exists(args.features_cache_dir):
os.makedirs(args.features_cache_dir)
cached_features_file = os.path.join(
args.features_cache_dir,
"cached_{}_{}_{}_{}".format(
data_split,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
# Load data features from cache or dataset file
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = load_dataset(args, task, data_split)
if task == "winogrande":
features = convert_mc_examples_to_features(
examples,
label_list,
args.max_seq_length,
tokenizer,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,)
else:
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
# Make sure only the first process in distributed training
# process the dataset, and the others will use the cache
torch.distributed.barrier()
if task == "winogrande":
return get_winogrande_tensors(features)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_example_ids = torch.tensor([f.example_id for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_example_ids)
return dataset
def run_transformer(args):
if (os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir):
raise ValueError(
f"Output directory ({args.output_dir}) already exists and is not empty."
f" Use --overwrite_output_dir to overcome.")
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see
# https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
logger.info("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,)
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
args.learning_rate = float(args.learning_rate)
if args.do_train:
# If training for the first time, remove cache. If training from a checkpoint, keep cache.
if os.path.exists(args.features_cache_dir) and not args.overwrite_output_dir:
logger.info(f"Found existing cache for the same seed {args.seed}: "
f"{args.features_cache_dir}...Deleting!")
shutil.rmtree(args.features_cache_dir)
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
save_args_to_file(args, mode="train")
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(f" global_step = {global_step}, average loss = {tr_loss:.4f}")
# Saving best-practices: if you use defaults names for the model,
# you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
if not args.evaluate_during_training:
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
model_to_save = (model.module if hasattr(model, "module") else model)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
logger.info(" **** Done with training ****")
# Evaluation
eval_splits = []
if args.do_eval:
eval_splits.append("dev")
if args.do_test:
eval_splits.append("test")
if args.do_test or args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(
glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
results = {}
prefix = args.test.split("/")[-1].split(".tsv")[0] if args.test else ""
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix += checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
for eval_split in eval_splits:
save_args_to_file(args, mode=eval_split)
result, predictions = evaluate(args, model, tokenizer, prefix=prefix, eval_split=eval_split)
result = dict((k + f"_{global_step}", v) for k, v in result.items())
results.update(result)
if args.test and "diagnostic" in args.test:
# For running diagnostics with MNLI, run as SNLI and use hack.
evaluate_by_category(predictions[args.task_name],
mnli_hack=True if args.task_name in ["SNLI", "snli"] and "mnli" in args.output_dir else False,
eval_filename=os.path.join(args.output_dir, f"eval_metrics_diagnostics.json"),
diagnostics_file_carto=args.test)
logger.info(" **** Done ****")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config",
"-c",
type=os.path.abspath,
required=True,
help="Main config file with basic arguments.")
parser.add_argument("--output_dir",
"-o",
type=os.path.abspath,
required=True,
help="Output directory for model.")
parser.add_argument("--do_train",
action="store_true",
help="Whether to run training.")
parser.add_argument("--do_eval",
action="store_true",
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test",
action="store_true",
help="Whether to run eval on the (OOD) test set.")
parser.add_argument("--test",
type=os.path.abspath,
help="OOD test set.")
# TODO(SS): Automatically map tasks to OOD test sets.
args_from_cli = parser.parse_args()
other_args = json.loads(_jsonnet.evaluate_file(args_from_cli.config))
other_args.update(**vars(args_from_cli))
args = Params(MODEL_CLASSES, ALL_MODELS, processors, other_args)
run_transformer(args)
if __name__ == "__main__":
main()
|
cartography-main
|
cartography/classification/run_glue.py
|
from transformers.data.processors.glue import MnliProcessor, MnliMismatchedProcessor
class AdaptedMnliProcessor(MnliProcessor):
def get_examples(self, data_file, set_type):
return self._create_examples(self._read_tsv(data_file), set_type=set_type)
class AdaptedMnliMismatchedProcessor(MnliMismatchedProcessor):
def get_examples(self, data_file, set_type):
return self._create_examples(self._read_tsv(data_file), set_type=set_type)
|
cartography-main
|
cartography/classification/mnli_utils.py
|
cartography-main
|
cartography/classification/__init__.py
|
|
import logging
import os
from transformers import glue_compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes
from transformers import glue_processors
from transformers.data.processors.glue import MnliMismatchedProcessor
from transformers.data.processors.utils import InputFeatures
from transformers.file_utils import is_tf_available
if is_tf_available():
import tensorflow as tf
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
from cartography.data_utils_glue import convert_string_to_unique_number
from cartography.classification.mnli_utils import AdaptedMnliMismatchedProcessor, AdaptedMnliProcessor
from cartography.classification.qnli_utils import AdaptedQnliProcessor
from cartography.classification.snli_utils import SNLIProcessor
from cartography.classification.winogrande_utils import WinograndeProcessor
glue_processors["snli"] = SNLIProcessor
glue_processors["mnli"] = AdaptedMnliProcessor
glue_processors["mnli-mm"] = AdaptedMnliMismatchedProcessor
glue_processors["qnli"] = AdaptedQnliProcessor
glue_processors["winogrande"] = WinograndeProcessor
glue_output_modes["snli"] = "classification"
glue_output_modes["winogrande"] = "classification"
class AdaptedInputFeatures(InputFeatures):
def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None, example_id=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
self.example_id = example_id
def adapted_glue_convert_examples_to_features(
examples,
tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
):
"""
Adapted from `transformers`. New functionality: also return an integer ID for each example.
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
is_tf_dataset = False
if is_tf_available() and isinstance(examples, tf.data.Dataset):
is_tf_dataset = True
if task is not None:
processor = glue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = glue_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
len_examples = 0
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
len_examples = tf.data.experimental.cardinality(examples)
else:
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length,)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(
len(attention_mask), max_length
)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(
len(token_type_ids), max_length
)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
example_int_id = convert_string_to_unique_number(example.guid)
if ex_index < 5:
logger.info("*** Example ***")
logger.info(f"guid: {example_int_id}")
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
AdaptedInputFeatures(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label,
example_id=example_int_id))
if is_tf_available() and is_tf_dataset:
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
return tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([]),
),
)
return features
def adapted_glue_compute_metrics(task_name, preds, labels):
"Adapted from `glue_compute_metrics` to also handle SNLI."
try:
return glue_compute_metrics(task_name, preds, labels)
except KeyError:
if task_name in ["snli", "winogrande", "toxic"]:
# Since MNLI also uses accuracy.
return glue_compute_metrics("mnli", preds, labels)
raise KeyError(task_name)
|
cartography-main
|
cartography/classification/glue_utils.py
|
"""
Utils for Multiple Choice Classification (for WinoGrande).
"""
import logging
import tqdm
from typing import List
from transformers import PreTrainedTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
class MCInputExample(object):
"""A single training/test example for multiple choice"""
def __init__(self, example_id, question, contexts, endings, label=None):
"""Constructs an MCInputExample.
Args:
example_id: Unique id for the example.
contexts: list of str. The untokenized text of the first sequence (context of corresponding question).
question: string. The untokenized text of the second sequence (question).
endings: list of str. multiple choice's options. Its length must be equal to contexts' length.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.example_id = example_id
self.question = question
self.contexts = contexts
self.endings = endings
self.label = label
class MCInputFeatures(object):
def __init__(self, example_id, choices_features, label):
self.example_id = example_id
self.choices_features = [
{"input_ids": input_ids, "input_mask": input_mask, "segment_ids": segment_ids}
for input_ids, input_mask, segment_ids in choices_features
]
self.label = label
def convert_mc_examples_to_features(
examples: List[MCInputExample],
label_list: List[str],
max_length: int,
tokenizer: PreTrainedTokenizer,
pad_token_segment_id=0,
pad_on_left=False,
pad_token=0,
mask_padding_with_zero=True,
) -> List[MCInputFeatures]:
"""
Loads a data file into a list of `MCInputFeatures`
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="converting MC examples to features"):
if ex_index % 10000 == 0:
logger.info(" Writing example %d of %d" % (ex_index, len(examples)))
choices_features = []
for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)):
text_a = context
if example.question.find("_") != -1:
# this is for cloze question
text_b = example.question.replace("_", ending)
else:
text_b = example.question + " " + ending
inputs = tokenizer.encode_plus(
text_a, text_b, add_special_tokens=True, max_length=max_length, return_token_type_ids=True
)
if "num_truncated_tokens" in inputs and inputs["num_truncated_tokens"] > 0:
logger.info(
"Attention! you are cropping tokens (swag task is ok). "
"If you are training ARC and RACE and you are poping question + options,"
"you need to try to use a bigger max seq length!"
)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length
assert len(attention_mask) == max_length
assert len(token_type_ids) == max_length
choices_features.append((input_ids, attention_mask, token_type_ids))
label = label_map[example.label]
if ex_index < 2:
logger.info("*** Example ***")
logger.info("winogrande_id: {}".format(example.example_id))
logger.info("winogrande_context: {}".format(example.contexts[0]))
for choice_idx, (input_ids, attention_mask, token_type_ids) in enumerate(choices_features):
logger.info(f"choice {choice_idx}: {example.endings[choice_idx]}")
logger.info("input_ids: {}".format(" ".join(map(str, input_ids))))
logger.info("attention_mask: {}".format(" ".join(map(str, attention_mask))))
logger.info("token_type_ids: {}".format(" ".join(map(str, token_type_ids))))
logger.info(f"label: {label == choice_idx}")
features.append(MCInputFeatures(example_id=example.example_id,
choices_features=choices_features,
label=label,))
return features
|
cartography-main
|
cartography/classification/multiple_choice_utils.py
|
"""
Scripts to handle the Winogrande task.
"""
import json
import logging
import os
import tqdm
from transformers.data.processors.utils import DataProcessor
from cartography.classification.multiple_choice_utils import MCInputExample
from cartography.data_utils import read_data
class WinograndeProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
read_data(os.path.join(data_dir, "train.tsv"), task_name="WINOGRANDE"))
def get_dev_examples(self, data_dir):
return self._create_examples(
read_data(os.path.join(data_dir, "dev.tsv"), task_name="WINOGRANDE"))
def get_test_examples(self, data_dir):
return self._create_examples(
read_data(os.path.join(data_dir, "test.tsv"), task_name="WINOGRANDE"))
def get_examples(self, data_file, set_type):
return self._create_examples(read_data(data_file, task_name="WINOGRANDE"))
def get_labels(self):
"""See base class."""
return ["1", "2"]
def _build_example_from_named_fields(self, guid, sentence, name1, name2, label):
conj = "_"
idx = sentence.index(conj)
context = sentence[:idx]
option_str = "_ " + sentence[idx + len(conj):].strip()
option1 = option_str.replace("_", name1)
option2 = option_str.replace("_", name2)
mc_example = MCInputExample(
example_id=int(guid),
contexts=[context, context],
question=conj,
endings = [option1, option2],
label=label
)
return mc_example
def _create_examples(self, records):
tsv_dict, header = records
examples = []
for idx, line in tsv_dict.items():
fields = line.strip().split("\t")
assert idx == fields[0]
sentence = fields[2]
name1 = fields[3]
name2 = fields[4]
if len(fields) > 5:
label = fields[-1]
else:
label = "1" # Dummy label for test prediction.
mc_example = self._build_example_from_named_fields(idx, sentence, name1, name2, label)
examples.append(mc_example)
return examples
def _create_examples_jsonl(self, records):
examples = []
for (i, record) in enumerate(records):
sentence = record['sentence']
name1 = record['option1']
name2 = record['option2']
if not 'answer' in record:
# This is a dummy label for test prediction.
# test.jsonl doesn't include the `answer`.
label = "1"
else:
label = record['answer']
mc_example = self._build_example_from_named_fields(i, sentence, name1, name2, label)
examples.append(mc_example)
return examples
|
cartography-main
|
cartography/classification/winogrande_utils.py
|
import os
from transformers.data.processors.utils import DataProcessor, InputExample
class SNLIProcessor(DataProcessor):
"""Processor for the SNLI data set (GLUE version)."""
def get_labels(self):
return ["entailment", "neutral", "contradiction"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = line[2] #"%s-%s" % (set_type, line[0])
text_a = line[7]
text_b = line[8]
label = line[-1]
if label == "-" or label == "":
continue
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_examples(self, data_file, set_type):
return self._create_examples(self._read_tsv(data_file), set_type=set_type)
def get_train_examples(self, data_dir):
"""See base class."""
return self.get_examples(os.path.join(data_dir, "train.tsv"), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self.get_examples(os.path.join(data_dir, "dev.tsv"), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self.get_examples(os.path.join(data_dir, "test.tsv"), "test")
|
cartography-main
|
cartography/classification/snli_utils.py
|
import json
import logging
import numpy as np
import os
import pandas as pd
import tqdm
from typing import List
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
def log_training_dynamics(output_dir: os.path,
epoch: int,
train_ids: List[int],
train_logits: List[List[float]],
train_golds: List[int]):
"""
Save training dynamics (logits) from given epoch as records of a `.jsonl` file.
"""
td_df = pd.DataFrame({"guid": train_ids,
f"logits_epoch_{epoch}": train_logits,
"gold": train_golds})
logging_dir = os.path.join(output_dir, f"training_dynamics")
# Create directory for logging training dynamics, if it doesn't already exist.
if not os.path.exists(logging_dir):
os.makedirs(logging_dir)
epoch_file_name = os.path.join(logging_dir, f"dynamics_epoch_{epoch}.jsonl")
td_df.to_json(epoch_file_name, lines=True, orient="records")
logger.info(f"Training Dynamics logged to {epoch_file_name}")
def read_training_dynamics(model_dir: os.path,
strip_last: bool = False,
id_field: str = "guid",
burn_out: int = None):
"""
Given path to logged training dynamics, merge stats across epochs.
Returns:
- Dict between ID of a train instances and its gold label, and the list of logits across epochs.
"""
train_dynamics = {}
td_dir = os.path.join(model_dir, "training_dynamics")
num_epochs = len([f for f in os.listdir(td_dir) if os.path.isfile(os.path.join(td_dir, f))])
if burn_out:
num_epochs = burn_out
logger.info(f"Reading {num_epochs} files from {td_dir} ...")
for epoch_num in tqdm.tqdm(range(num_epochs)):
epoch_file = os.path.join(td_dir, f"dynamics_epoch_{epoch_num}.jsonl")
assert os.path.exists(epoch_file)
with open(epoch_file, "r") as infile:
for line in infile:
record = json.loads(line.strip())
guid = record[id_field] if not strip_last else record[id_field][:-1]
if guid not in train_dynamics:
assert epoch_num == 0
train_dynamics[guid] = {"gold": record["gold"], "logits": []}
train_dynamics[guid]["logits"].append(record[f"logits_epoch_{epoch_num}"])
logger.info(f"Read training dynamics for {len(train_dynamics)} train instances.")
return train_dynamics
|
cartography-main
|
cartography/selection/selection_utils.py
|
"""
Randomly sample dataset examples for a data selection baseline.
"""
import argparse
import logging
import os
import pandas as pd
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
from cartography.data_utils import read_data, convert_tsv_entries_to_dataframe, copy_dev_test
if __name__ == "__main__":
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir",
"-i",
required=True,
type=os.path.abspath,
help="Path containing the TSV train file from which to subsample.",)
parser.add_argument("--output_dir",
"-o",
required=True,
type=os.path.abspath,
help="Path where randomly subsampled data is to be written.",)
parser.add_argument("--task_name",
"-t",
default="SNLI",
choices=("SNLI", "MNLI", "WINOGRANDE", "QNLI"),
help="Name of GLUE-style task.",)
parser.add_argument("--seed",
type=int,
default=725862,
help="Random seed for sampling.")
parser.add_argument("--fraction",
"-f",
type=float,
help="Number between 0 and 1, indicating fraction of random samples to select.")
args = parser.parse_args()
if args.fraction and 0 < args.fraction < 1:
fractions = [args.fraction]
else:
fractions = [0.01, 0.05, 0.10, 0.1667, 0.25, 0.33, 0.50, 0.75]
# Read the input train file.
input_train_file = os.path.join(args.input_dir, "train.tsv")
try:
train = pd.read_csv(input_train_file, sep="\t")
except pd.errors.ParserError:
logger.info(f"Could not parse {input_train_file}. "
"Will read it as TSV and then convert into a Pandas dataframe.")
train_dict, train_header = read_data(input_train_file, task_name=args.task_name)
train = convert_tsv_entries_to_dataframe(train_dict, header=train_header)
logger.info(f"Read {len(train)} examples from {input_train_file}. "
f"Creating {fractions} subsamples...")
outdir_base = f"{args.output_dir}_{args.seed}"
for fraction in fractions:
outdir = os.path.join(outdir_base, f"{args.task_name}_{fraction:.2f}/{args.task_name}")
if not os.path.exists(outdir):
os.makedirs(outdir)
out_file_name = os.path.join(outdir, "train.tsv")
# Dev and test need not be subsampled.
copy_dev_test(args.task_name, from_dir=args.input_dir, to_dir=outdir)
# Train set needs to be subsampled.
train_sample = train.sample(n=int(fraction * len(train)),
random_state=args.seed) # Set seed for replication.
train_sample.to_csv(out_file_name, sep="\t", index=False)
logger.info(f"Wrote {len(train_sample)} examples to {out_file_name}")
|
cartography-main
|
cartography/selection/random_filtering.py
|
"""
Filtering and dataset mapping methods based on training dynamics.
By default, this module reads training dynamics from a given trained model and
computes the metrics---confidence, variability, correctness,
as well as baseline metrics of forgetfulness and threshold closeness
for each instance in the training data.
If specified, data maps can be plotted with respect to confidence and variability.
Moreover, datasets can be filtered with respect any of the other metrics.
"""
import argparse
import json
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
import torch
import tqdm
from collections import defaultdict
from typing import List
from cartography.data_utils import read_data, read_jsonl, copy_dev_test
from cartography.selection.selection_utils import read_training_dynamics
# TODO(SS): Named tuple for tasks and filtering methods.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
def compute_forgetfulness(correctness_trend: List[float]) -> int:
"""
Given a epoch-wise trend of train predictions, compute frequency with which
an example is forgotten, i.e. predicted incorrectly _after_ being predicted correctly.
Based on: https://arxiv.org/abs/1812.05159
"""
if not any(correctness_trend): # Example is never predicted correctly, or learnt!
return 1000
learnt = False # Predicted correctly in the current epoch.
times_forgotten = 0
for is_correct in correctness_trend:
if (not learnt and not is_correct) or (learnt and is_correct):
# nothing changed.
continue
elif learnt and not is_correct:
# Forgot after learning at some point!
learnt = False
times_forgotten += 1
elif not learnt and is_correct:
# Learnt!
learnt = True
return times_forgotten
def compute_correctness(trend: List[float]) -> float:
"""
Aggregate #times an example is predicted correctly during all training epochs.
"""
return sum(trend)
def compute_train_dy_metrics(training_dynamics, args):
"""
Given the training dynamics (logits for each training instance across epochs), compute metrics
based on it, for data map coorodinates.
Computed metrics are: confidence, variability, correctness, forgetfulness, threshold_closeness---
the last two being baselines from prior work
(Example Forgetting: https://arxiv.org/abs/1812.05159 and
Active Bias: https://arxiv.org/abs/1704.07433 respectively).
Returns:
- DataFrame with these metrics.
- DataFrame with more typical training evaluation metrics, such as accuracy / loss.
"""
confidence_ = {}
variability_ = {}
threshold_closeness_ = {}
correctness_ = {}
forgetfulness_ = {}
# Functions to be applied to the data.
variability_func = lambda conf: np.std(conf)
if args.include_ci: # Based on prior work on active bias (https://arxiv.org/abs/1704.07433)
variability_func = lambda conf: np.sqrt(np.var(conf) + np.var(conf) * np.var(conf) / (len(conf)-1))
threshold_closeness_func = lambda conf: conf * (1 - conf)
loss = torch.nn.CrossEntropyLoss()
num_tot_epochs = len(list(training_dynamics.values())[0]["logits"])
if args.burn_out < num_tot_epochs:
logger.info(f"Computing training dynamics. Burning out at {args.burn_out} of {num_tot_epochs}. ")
else:
logger.info(f"Computing training dynamics across {num_tot_epochs} epochs")
logger.info("Metrics computed: confidence, variability, correctness, forgetfulness, threshold_closeness")
logits = {i: [] for i in range(num_tot_epochs)}
targets = {i: [] for i in range(num_tot_epochs)}
training_accuracy = defaultdict(float)
for guid in tqdm.tqdm(training_dynamics):
correctness_trend = []
true_probs_trend = []
record = training_dynamics[guid]
for i, epoch_logits in enumerate(record["logits"]):
probs = torch.nn.functional.softmax(torch.Tensor(epoch_logits), dim=-1)
true_class_prob = float(probs[record["gold"]])
true_probs_trend.append(true_class_prob)
prediction = np.argmax(epoch_logits)
is_correct = (prediction == record["gold"]).item()
correctness_trend.append(is_correct)
training_accuracy[i] += is_correct
logits[i].append(epoch_logits)
targets[i].append(record["gold"])
if args.burn_out < num_tot_epochs:
correctness_trend = correctness_trend[:args.burn_out]
true_probs_trend = true_probs_trend[:args.burn_out]
correctness_[guid] = compute_correctness(correctness_trend)
confidence_[guid] = np.mean(true_probs_trend)
variability_[guid] = variability_func(true_probs_trend)
forgetfulness_[guid] = compute_forgetfulness(correctness_trend)
threshold_closeness_[guid] = threshold_closeness_func(confidence_[guid])
# Should not affect ranking, so ignoring.
epsilon_var = np.mean(list(variability_.values()))
column_names = ['guid',
'index',
'threshold_closeness',
'confidence',
'variability',
'correctness',
'forgetfulness',]
df = pd.DataFrame([[guid,
i,
threshold_closeness_[guid],
confidence_[guid],
variability_[guid],
correctness_[guid],
forgetfulness_[guid],
] for i, guid in enumerate(correctness_)], columns=column_names)
df_train = pd.DataFrame([[i,
loss(torch.Tensor(logits[i]), torch.LongTensor(targets[i])).item() / len(training_dynamics),
training_accuracy[i] / len(training_dynamics)
] for i in range(num_tot_epochs)],
columns=['epoch', 'loss', 'train_acc'])
return df, df_train
def consider_ascending_order(filtering_metric: str) -> bool:
"""
Determine if the metric values' sorting order to get the most `valuable` examples for training.
"""
if filtering_metric == "variability":
return False
elif filtering_metric == "confidence":
return True
elif filtering_metric == "threshold_closeness":
return False
elif filtering_metric == "forgetfulness":
return False
elif filtering_metric == "correctness":
return True
else:
raise NotImplementedError(f"Filtering based on {filtering_metric} not implemented!")
def write_filtered_data(args, train_dy_metrics):
"""
Filter data based on the given metric, and write it in TSV format to train GLUE-style classifier.
"""
# First save the args for filtering, to keep track of which model was used for filtering.
argparse_dict = vars(args)
with open(os.path.join(args.filtering_output_dir, f"filtering_configs.json"), "w") as outfile:
outfile.write(json.dumps(argparse_dict, indent=4, sort_keys=True) + "\n")
# Determine whether to sort data in ascending order or not, based on the metric.
is_ascending = consider_ascending_order(args.metric)
if args.worst:
is_ascending = not is_ascending
# Sort by selection.
sorted_scores = train_dy_metrics.sort_values(by=[args.metric],
ascending=is_ascending)
original_train_file = os.path.join(os.path.join(args.data_dir, args.task_name), f"train.tsv")
train_numeric, header = read_data(original_train_file, task_name=args.task_name, guid_as_int=True)
for fraction in [0.01, 0.05, 0.10, 0.1667, 0.25, 0.3319, 0.50, 0.75]:
outdir = os.path.join(args.filtering_output_dir,
f"cartography_{args.metric}_{fraction:.2f}/{args.task_name}")
if not os.path.exists(outdir):
os.makedirs(outdir)
# Dev and test need not be subsampled.
copy_dev_test(args.task_name,
from_dir=os.path.join(args.data_dir, args.task_name),
to_dir=outdir)
num_samples = int(fraction * len(train_numeric))
with open(os.path.join(outdir, f"train.tsv"), "w") as outfile:
outfile.write(header + "\n")
selected = sorted_scores.head(n=num_samples+1)
if args.both_ends:
hardest = sorted_scores.head(n=int(num_samples * 0.7))
easiest = sorted_scores.tail(n=num_samples - hardest.shape[0])
selected = pd.concat([hardest, easiest])
fm = args.metric
logger.info(f"Selecting both ends: {fm} = "
f"({hardest.head(1)[fm].values[0]:3f}: {hardest.tail(1)[fm].values[0]:3f}) "
f"& ({easiest.head(1)[fm].values[0]:3f}: {easiest.tail(1)[fm].values[0]:3f})")
selection_iterator = tqdm.tqdm(range(len(selected)))
for idx in selection_iterator:
selection_iterator.set_description(
f"{args.metric} = {selected.iloc[idx][args.metric]:.4f}")
selected_id = selected.iloc[idx]["guid"]
if args.task_name in ["SNLI", "MNLI"]:
selected_id = int(selected_id)
elif args.task_name == "WINOGRANDE":
selected_id = str(int(selected_id))
record = train_numeric[selected_id]
outfile.write(record + "\n")
logger.info(f"Wrote {num_samples} samples to {outdir}.")
def plot_data_map(dataframe: pd.DataFrame,
plot_dir: os.path,
hue_metric: str = 'correct.',
title: str = '',
model: str = 'RoBERTa',
show_hist: bool = False,
max_instances_to_plot = 55000):
# Set style.
sns.set(style='whitegrid', font_scale=1.6, font='Georgia', context='paper')
logger.info(f"Plotting figure for {title} using the {model} model ...")
# Subsample data to plot, so the plot is not too busy.
dataframe = dataframe.sample(n=max_instances_to_plot if dataframe.shape[0] > max_instances_to_plot else len(dataframe))
# Normalize correctness to a value between 0 and 1.
dataframe = dataframe.assign(corr_frac = lambda d: d.correctness / d.correctness.max())
dataframe['correct.'] = [f"{x:.1f}" for x in dataframe['corr_frac']]
main_metric = 'variability'
other_metric = 'confidence'
hue = hue_metric
num_hues = len(dataframe[hue].unique().tolist())
style = hue_metric if num_hues < 8 else None
if not show_hist:
fig, ax0 = plt.subplots(1, 1, figsize=(8, 6))
else:
fig = plt.figure(figsize=(14, 10), )
gs = fig.add_gridspec(3, 2, width_ratios=[5, 1])
ax0 = fig.add_subplot(gs[:, 0])
# Make the scatterplot.
# Choose a palette.
pal = sns.diverging_palette(260, 15, n=num_hues, sep=10, center="dark")
plot = sns.scatterplot(x=main_metric,
y=other_metric,
ax=ax0,
data=dataframe,
hue=hue,
palette=pal,
style=style,
s=30)
# Annotate Regions.
bb = lambda c: dict(boxstyle="round,pad=0.3", ec=c, lw=2, fc="white")
func_annotate = lambda text, xyc, bbc : ax0.annotate(text,
xy=xyc,
xycoords="axes fraction",
fontsize=15,
color='black',
va="center",
ha="center",
rotation=350,
bbox=bb(bbc))
an1 = func_annotate("ambiguous", xyc=(0.9, 0.5), bbc='black')
an2 = func_annotate("easy-to-learn", xyc=(0.27, 0.85), bbc='r')
an3 = func_annotate("hard-to-learn", xyc=(0.35, 0.25), bbc='b')
if not show_hist:
plot.legend(ncol=1, bbox_to_anchor=[0.175, 0.5], loc='right')
else:
plot.legend(fancybox=True, shadow=True, ncol=1)
plot.set_xlabel('variability')
plot.set_ylabel('confidence')
if show_hist:
plot.set_title(f"{title}-{model} Data Map", fontsize=17)
# Make the histograms.
ax1 = fig.add_subplot(gs[0, 1])
ax2 = fig.add_subplot(gs[1, 1])
ax3 = fig.add_subplot(gs[2, 1])
plott0 = dataframe.hist(column=['confidence'], ax=ax1, color='#622a87')
plott0[0].set_title('')
plott0[0].set_xlabel('confidence')
plott0[0].set_ylabel('density')
plott1 = dataframe.hist(column=['variability'], ax=ax2, color='teal')
plott1[0].set_title('')
plott1[0].set_xlabel('variability')
plott1[0].set_ylabel('density')
plot2 = sns.countplot(x="correct.", data=dataframe, ax=ax3, color='#86bf91')
ax3.xaxis.grid(True) # Show the vertical gridlines
plot2.set_title('')
plot2.set_xlabel('correctness')
plot2.set_ylabel('density')
fig.tight_layout()
filename = f'{plot_dir}/{title}_{model}.pdf' if show_hist else f'figures/compact_{title}_{model}.pdf'
fig.savefig(filename, dpi=300)
logger.info(f"Plot saved to {filename}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--filter",
action="store_true",
help="Whether to filter data subsets based on specified `metric`.")
parser.add_argument("--plot",
action="store_true",
help="Whether to plot data maps and save as `pdf`.")
parser.add_argument("--model_dir",
"-o",
required=True,
type=os.path.abspath,
help="Directory where model training dynamics stats reside.")
parser.add_argument("--data_dir",
"-d",
default="/Users/swabhas/data/glue/WINOGRANDE/xl/",
type=os.path.abspath,
help="Directory where data for task resides.")
parser.add_argument("--plots_dir",
default="./cartography/",
type=os.path.abspath,
help="Directory where plots are to be saved.")
parser.add_argument("--task_name",
"-t",
default="WINOGRANDE",
choices=("SNLI", "MNLI", "QNLI", "WINOGRANDE"),
help="Which task are we plotting or filtering for.")
parser.add_argument('--metric',
choices=('threshold_closeness',
'confidence',
'variability',
'correctness',
'forgetfulness'),
help="Metric to filter data by.",)
parser.add_argument("--include_ci",
action="store_true",
help="Compute the confidence interval for variability.")
parser.add_argument("--filtering_output_dir",
"-f",
default="./filtered/",
type=os.path.abspath,
help="Output directory where filtered datasets are to be written.")
parser.add_argument("--worst",
action="store_true",
help="Select from the opposite end of the spectrum acc. to metric,"
"for baselines")
parser.add_argument("--both_ends",
action="store_true",
help="Select from both ends of the spectrum acc. to metric,")
parser.add_argument("--burn_out",
type=int,
default=100,
help="# Epochs for which to compute train dynamics.")
parser.add_argument("--model",
default="RoBERTa",
help="Model for which data map is being plotted")
args = parser.parse_args()
training_dynamics = read_training_dynamics(args.model_dir,
strip_last=True if args.task_name in ["QNLI"] else False,
burn_out=args.burn_out if args.burn_out < 100 else None)
total_epochs = len(list(training_dynamics.values())[0]["logits"])
if args.burn_out > total_epochs:
args.burn_out = total_epochs
logger.info(f"Total epochs found: {args.burn_out}")
train_dy_metrics, _ = compute_train_dy_metrics(training_dynamics, args)
burn_out_str = f"_{args.burn_out}" if args.burn_out > total_epochs else ""
train_dy_filename = os.path.join(args.model_dir, f"td_metrics{burn_out_str}.jsonl")
train_dy_metrics.to_json(train_dy_filename,
orient='records',
lines=True)
logger.info(f"Metrics based on Training Dynamics written to {train_dy_filename}")
if args.filter:
assert args.filtering_output_dir
if not os.path.exists(args.filtering_output_dir):
os.makedirs(args.filtering_output_dir)
assert args.metric
write_filtered_data(args, train_dy_metrics)
if args.plot:
assert args.plots_dir
if not os.path.exists(args.plots_dir):
os.makedirs(args.plots_dir)
plot_data_map(train_dy_metrics, args.plots_dir, title=args.task_name, show_hist=True, model=args.model)
|
cartography-main
|
cartography/selection/train_dy_filtering.py
|
cartography-main
|
cartography/selection/__init__.py
|
|
from invoke import task
@task
def set_version(c, version):
"""Writes the version upon a release."""
for filename in ["setup.py", "ai2thor_colab/__init__.py"]:
with open(filename, "r") as f:
file = f.read()
file = file.replace("<REPLACE_WITH_VERSION>", version)
with open(filename, "w") as f:
f.write(file)
|
ai2thor-colab-main
|
tasks.py
|
from setuptools import setup
version = "<REPLACE_WITH_VERSION>"
with open("README.md", encoding="utf-8") as f:
long_description = f.read()
setup(
name="ai2thor_colab",
packages=["ai2thor_colab"],
version=version,
license="Apache 2.0",
description="Utility functions for using AI2-THOR with Google Colab.",
long_description=long_description,
long_description_content_type="text/markdown",
author_email="[email protected]",
author="Allen Institute for AI",
install_requires=["numpy", "moviepy>=1.0.3", "pandas", "ai2thor", "Pillow"],
url="https://github.com/allenai/ai2thor-colab",
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
|
ai2thor-colab-main
|
setup.py
|
from IPython.display import HTML, display
import sys
from moviepy.editor import ImageSequenceClip
from typing import Sequence
import numpy as np
import os
from typing import Optional
import ai2thor.server
from typing import Union
from PIL import Image
import matplotlib.pyplot as plt
__version__ = "<REPLACE_WITH_VERSION>"
__all__ = ["plot_frames", "show_video", "start_xserver", "overlay", "side_by_side"]
def show_objects_table(objects: list) -> None:
"""Visualizes objects in a way that they are clickable and filterable.
Example:
event = controller.step("MoveAhead")
objects = event.metadata["objects"]
show_objects_table(objects)
"""
import pandas as pd
from collections import OrderedDict
from google.colab.data_table import DataTable
processed_objects = []
for obj in objects:
obj = obj.copy()
obj["position[x]"] = round(obj["position"]["x"], 4)
obj["position[y]"] = round(obj["position"]["y"], 4)
obj["position[z]"] = round(obj["position"]["z"], 4)
obj["rotation[x]"] = round(obj["rotation"]["x"], 4)
obj["rotation[y]"] = round(obj["rotation"]["y"], 4)
obj["rotation[z]"] = round(obj["rotation"]["z"], 4)
del obj["position"]
del obj["rotation"]
# these are too long to display
del obj["objectOrientedBoundingBox"]
del obj["axisAlignedBoundingBox"]
del obj["receptacleObjectIds"]
obj["mass"] = round(obj["mass"], 4)
obj["distance"] = round(obj["distance"], 4)
obj = OrderedDict(obj)
obj.move_to_end("distance", last=False)
obj.move_to_end("rotation[z]", last=False)
obj.move_to_end("rotation[y]", last=False)
obj.move_to_end("rotation[x]", last=False)
obj.move_to_end("position[z]", last=False)
obj.move_to_end("position[y]", last=False)
obj.move_to_end("position[x]", last=False)
obj.move_to_end("name", last=False)
obj.move_to_end("objectId", last=False)
obj.move_to_end("objectType", last=False)
processed_objects.append(obj)
df = pd.DataFrame(processed_objects)
print(
"Object Metadata. Not showing objectOrientedBoundingBox, axisAlignedBoundingBox, and receptacleObjectIds for clarity."
)
return DataTable(df, max_columns=150, num_rows_per_page=150)
def overlay(
frame1: np.ndarray,
frame2: np.ndarray,
title: Optional[str] = None,
frame2_alpha: float = 0.75,
) -> None:
"""Blend image frame1 and frame2 on top of each other.
Example:
event1 = controller.last_event
event2 = controller.step("RotateRight")
overlay(event1.frame, event2.frame)
"""
fig, ax = plt.subplots(nrows=1, ncols=1, dpi=150, figsize=(4, 5))
if not (0 < frame2_alpha < 1):
raise ValueError("frame2_alpha must be in (0:1) not " + frame2_alpha)
if frame1.dtype == np.uint8:
frame1 = frame1 / 255
if frame2.dtype == np.uint8:
frame2 = frame2 / 255
ax.imshow(frame2_alpha * frame2 + (1 - frame2_alpha) * frame1)
ax.axis("off")
if title:
fig.suptitle(title, y=0.87, x=0.5125)
def side_by_side(
frame1: np.ndarray, frame2: np.ndarray, title: Optional[str] = None
) -> None:
"""Plot 2 image frames next to each other.
Example:
event1 = controller.last_event
event2 = controller.step("RotateRight")
overlay(event1.frame, event2.frame)
"""
fig, axs = plt.subplots(nrows=1, ncols=2, dpi=150, figsize=(8, 5))
axs[0].imshow(frame1)
axs[0].axis("off")
axs[1].imshow(frame2)
axs[1].axis("off")
if title:
fig.suptitle(title, y=0.85, x=0.5125)
def plot_frames(event: Union[ai2thor.server.Event, np.ndarray]) -> None:
"""Visualize all the frames on an AI2-THOR Event.
Example:
plot_frames(controller.last_event)
"""
if isinstance(event, ai2thor.server.Event):
frames = dict()
third_person_frames = event.third_party_camera_frames
if event.frame is not None:
frames["RGB"] = event.frame
if event.instance_segmentation_frame is not None:
frames["Instance Segmentation"] = event.instance_segmentation_frame
if event.semantic_segmentation_frame is not None:
frames["Semantic Segmentation"] = event.semantic_segmentation_frame
if event.normals_frame is not None:
frames["Normals"] = event.normals_frame
if event.depth_frame is not None:
frames["Depth"] = event.depth_frame
if len(frames) == 0:
raise Exception("No agent frames rendered on this event!")
rows = 2 if len(third_person_frames) else 1
cols = max(len(frames), len(third_person_frames))
fig, axs = plt.subplots(
nrows=rows, ncols=cols, dpi=150, figsize=(3 * cols, 3 * rows)
)
agent_row = axs[0] if rows > 1 else axs
for i, (name, frame) in enumerate(frames.items()):
ax = agent_row[i] if cols > 1 else agent_row
im = ax.imshow(frame)
ax.axis("off")
ax.set_title(name)
if name == "Depth":
fig.colorbar(im, fraction=0.046, pad=0.04, ax=ax)
# set unused axes off
for i in range(len(frames), cols):
agent_row[i].axis("off")
# add third party camera frames
if rows > 1:
for i, frame in enumerate(third_person_frames):
ax = axs[1][i] if cols > 1 else axs[1]
ax.imshow(frame)
ax.axis("off")
for i in range(len(third_person_frames), cols):
axs[1][i].axis("off")
fig.text(x=0.1, y=0.715, s="Agent Frames", rotation="vertical", va="center")
fig.text(
x=0.1,
y=0.3025,
s="Third Person Frames",
rotation="vertical",
va="center",
)
elif isinstance(event, np.ndarray):
return Image.fromarray(event)
else:
raise Exception(
f"Unknown type: {type(event)}. "
"Must be np.ndarray or ai2thor.server.Event."
)
def show_video(frames: Sequence[np.ndarray], fps: int = 10):
"""Show a video composed of a sequence of frames.
Example:
frames = [
controller.step("RotateRight", degrees=5).frame
for _ in range(72)
]
show_video(frames, fps=5)
"""
frames = ImageSequenceClip(frames, fps=fps)
return frames.ipython_display()
def start_xserver() -> None:
"""Provide the ability to render AI2-THOR using Google Colab. """
# Thanks to the [Unity ML Agents team](https://github.com/Unity-Technologies/ml-agents)
# for most of this setup! :)
def progress(value):
return HTML(
f"""
<progress value='{value}' max="100", style='width: 100%'>
{value}
</progress>
"""
)
progress_bar = display(progress(0), display_id=True)
try:
import google.colab
using_colab = True
except ImportError:
using_colab = False
if using_colab:
with open("frame-buffer", "w") as writefile:
writefile.write(
"""#taken from https://gist.github.com/jterrace/2911875
XVFB=/usr/bin/Xvfb
XVFBARGS=":1 -screen 0 1024x768x24 -ac +extension GLX +render -noreset"
PIDFILE=./frame-buffer.pid
case "$1" in
start)
/sbin/start-stop-daemon --start --quiet --pidfile $PIDFILE --make-pidfile --background --exec $XVFB -- $XVFBARGS
;;
stop)
/sbin/start-stop-daemon --stop --quiet --pidfile $PIDFILE
rm $PIDFILE
;;
restart)
$0 stop
$0 start
;;
*)
exit 1
esac
exit 0
"""
)
progress_bar.update(progress(5))
os.system("apt-get install daemon >/dev/null 2>&1")
progress_bar.update(progress(10))
os.system("apt-get install wget >/dev/null 2>&1")
progress_bar.update(progress(20))
os.system(
"wget http://ai2thor.allenai.org/ai2thor-colab/libxfont1_1.5.1-1ubuntu0.16.04.4_amd64.deb >/dev/null 2>&1"
)
progress_bar.update(progress(30))
os.system(
"wget --output-document xvfb.deb http://ai2thor.allenai.org/ai2thor-colab/xvfb_1.18.4-0ubuntu0.12_amd64.deb >/dev/null 2>&1"
)
progress_bar.update(progress(40))
os.system("dpkg -i libxfont1_1.5.1-1ubuntu0.16.04.4_amd64.deb >/dev/null 2>&1")
progress_bar.update(progress(50))
os.system("dpkg -i xvfb.deb >/dev/null 2>&1")
progress_bar.update(progress(70))
os.system("rm libxfont1_1.5.1-1ubuntu0.16.04.4_amd64.deb")
progress_bar.update(progress(80))
os.system("rm xvfb.deb")
progress_bar.update(progress(90))
os.system("bash frame-buffer start")
os.environ["DISPLAY"] = ":1"
progress_bar.update(progress(100))
|
ai2thor-colab-main
|
ai2thor_colab/__init__.py
|
allennlp-demo-main
|
api/allennlp_demo/__init__.py
|
|
allennlp-demo-main
|
api/allennlp_demo/glove_sentiment_analysis/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class GloveSentimentAnalysisModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = GloveSentimentAnalysisModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/glove_sentiment_analysis/api.py
|
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.glove_sentiment_analysis.api import GloveSentimentAnalysisModelEndpoint
class TestGloveSentimentAnalysisModelEndpoint(ModelEndpointTestCase):
endpoint = GloveSentimentAnalysisModelEndpoint()
predict_input = {"sentence": "a very well-made, funny and entertaining picture."}
|
allennlp-demo-main
|
api/allennlp_demo/glove_sentiment_analysis/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/constituency_parser/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class ConstituencyParserModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = ConstituencyParserModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/constituency_parser/api.py
|
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.constituency_parser.api import ConstituencyParserModelEndpoint
class TestConstituencyParserModelEndpoint(ModelEndpointTestCase):
endpoint = ConstituencyParserModelEndpoint()
predict_input = {"sentence": "If I bring 10 dollars tomorrow, can you buy me lunch?"}
|
allennlp-demo-main
|
api/allennlp_demo/constituency_parser/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/tasks/__init__.py
|
|
"""
The tasks endpoint lists all demo tasks and some info about them.
"""
import logging
import flask
from allennlp_demo.common.logs import configure_logging
from allennlp_models.pretrained import get_tasks
logger = logging.getLogger(__name__)
class TasksService(flask.Flask):
def __init__(self, name: str = "tasks"):
super().__init__(name)
configure_logging(self)
@self.route("/", methods=["GET"])
def tasks():
tasks = get_tasks()
return flask.jsonify(tasks)
if __name__ == "__main__":
app = TasksService()
app.run(host="0.0.0.0", port=8000)
|
allennlp-demo-main
|
api/allennlp_demo/tasks/api.py
|
from .api import TasksService
def test_tasks():
app = TasksService()
client = app.test_client()
response = client.get("/")
assert response.status_code == 200
assert len(response.json.items()) > 0
|
allennlp-demo-main
|
api/allennlp_demo/tasks/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/semantic_role_labeling/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class SrlModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = SrlModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/semantic_role_labeling/api.py
|
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.semantic_role_labeling.api import SrlModelEndpoint
class TestSrlModelEndpoint(ModelEndpointTestCase):
endpoint = SrlModelEndpoint()
predict_input = {
"sentence": "Did Uriah honestly think he could beat the game in under three hours?"
}
|
allennlp-demo-main
|
api/allennlp_demo/semantic_role_labeling/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/dependency_parser/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class DependencyParserModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = DependencyParserModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/dependency_parser/api.py
|
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.dependency_parser.api import DependencyParserModelEndpoint
class TestDependencyParserModelEndpoint(ModelEndpointTestCase):
endpoint = DependencyParserModelEndpoint()
predict_input = {"sentence": "If I bring 10 dollars tomorrow, can you buy me lunch?"}
|
allennlp-demo-main
|
api/allennlp_demo/dependency_parser/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/wikitables_parser/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class WikitablesParserModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = WikitablesParserModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/wikitables_parser/api.py
|
from allennlp_demo.wikitables_parser.api import WikitablesParserModelEndpoint
from allennlp_demo.common.testing import ModelEndpointTestCase
class TestWikitablesParserModelEndpoint(ModelEndpointTestCase):
endpoint = WikitablesParserModelEndpoint()
predict_input = {
"table": "\n".join(
[
"Season Level Division Section Position Movements",
"1993 Tier 3 Division 2 Östra Svealand 1st Promoted",
"1994 Tier 2 Division 1 Norra 11th Relegation Playoffs",
"1995 Tier 2 Division 1 Norra 4th ",
"1996 Tier 2 Division 1 Norra 11th Relegation Playoffs - Relegated",
"1997 Tier 3 Division 2 Östra Svealand 3rd ",
"1998 Tier 3 Division 2 Östra Svealand 7th ",
"1999 Tier 3 Division 2 Östra Svealand 3rd ",
"2000 Tier 3 Division 2 Östra Svealand 9th ",
"2001 Tier 3 Division 2 Östra Svealand 7th ",
"2002 Tier 3 Division 2 Östra Svealand 2nd ",
"2003 Tier 3 Division 2 Östra Svealand 3rd ",
"2004 Tier 3 Division 2 Östra Svealand 6th ",
"2005 Tier 3 Division 2 Östra Svealand 4th Promoted",
"2006* Tier 3 Division 1 Norra 5th ",
"2007 Tier 3 Division 1 Södra 14th Relegated",
]
),
"question": "What is the only season with the 1st position?",
}
|
allennlp-demo-main
|
api/allennlp_demo/wikitables_parser/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/next_token_lm/__init__.py
|
|
import os
import re
from typing import Dict, Any
from allennlp.interpret.attackers import Attacker, Hotflip
from allennlp.predictors.predictor import JsonDict
from overrides import overrides
from allennlp_demo.common import config, http
class NextTokenLmModelEndpoint(http.ModelEndpoint):
# TODO: don't hardcode this.
_END_OF_TEXT_TOKEN = "<|endoftext|>"
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
@overrides
def load_attackers(self) -> Dict[str, Attacker]:
hotflip = Hotflip(self.predictor, "gpt2")
hotflip.initialize()
return {"hotflip": hotflip}
@overrides
def predict(self, inputs: JsonDict) -> JsonDict:
# We override this to do a little extra sanitization on the inputs.
# In particular, we strip any trailing whitespace (except for newlines,
# which are most likely intentional) and remove any double spaces, since
# these things result in poor predictions.
inputs["sentence"] = self._sanitize_input_text(inputs["sentence"])
outputs = self.predictor.predict_json(inputs)
# We also do some final sanitization on the outputs to remove the '<endoftext>' token
# and filter out any predicted sequences that are empty, i.e. just equal to the
# '<|endoftext|>' token repeated.
return self._sanitize_outputs(outputs)
@staticmethod
def _sanitize_input_text(sentence: str) -> str:
return re.sub(r" +", " ", sentence.rstrip(" \t\r"))
def _sanitize_outputs(self, output: Dict[str, Any]) -> Dict[str, Any]:
sanitized_top_tokens = []
sanitized_top_indices = []
sanitized_probabilities = []
for tokens, indices, probability in zip(
output["top_tokens"], output["top_indices"], output["probabilities"]
):
filtered = [(t, i) for (t, i) in zip(tokens, indices) if t != self._END_OF_TEXT_TOKEN]
if not filtered:
continue
new_tokens, new_indices = zip(*filtered)
sanitized_top_tokens.append(list(new_tokens))
sanitized_top_indices.append(list(new_indices))
sanitized_probabilities.append(probability)
output["top_tokens"] = sanitized_top_tokens
output["top_indices"] = sanitized_top_indices
output["probabilities"] = sanitized_probabilities
return output
if __name__ == "__main__":
endpoint = NextTokenLmModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/next_token_lm/api.py
|
import pytest
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.next_token_lm.api import NextTokenLmModelEndpoint
class TestNextTokenLmModelEndpoint(ModelEndpointTestCase):
endpoint = NextTokenLmModelEndpoint()
predict_input = {"sentence": "AlleNLP is a"}
@pytest.mark.parametrize(
"input_text, result",
[
(
"AllenNLP is one of the most popular ",
"AllenNLP is one of the most popular",
),
("AllenNLP is a framework.\n", "AllenNLP is a framework.\n"),
("AllenNLP is a framework.\t", "AllenNLP is a framework."),
],
)
def test_sanitize_input_text(self, input_text: str, result: str):
assert self.endpoint._sanitize_input_text(input_text) == result
@pytest.mark.parametrize(
"output, result",
[
(
{
"top_indices": [
[50256, 50256, 50256, 50256, 50256],
[679, 318, 262, 1772, 286],
[679, 318, 257, 2888, 286],
[679, 318, 635, 257, 2888],
[679, 318, 635, 262, 1772],
],
"probabilities": [
0.1311192363500595,
0.00410857517272234,
0.002775674918666482,
0.0008492876659147441,
0.00035627084434963763,
],
"top_tokens": [
[
"<|endoftext|>",
"<|endoftext|>",
"<|endoftext|>",
"<|endoftext|>",
"<|endoftext|>",
],
["ĠHe", "Ġis", "Ġthe", "Ġauthor", "Ġof"],
["ĠHe", "Ġis", "Ġa", "Ġmember", "Ġof"],
["ĠHe", "Ġis", "Ġalso", "Ġa", "Ġmember"],
["ĠHe", "Ġis", "Ġalso", "Ġthe", "Ġauthor"],
],
},
{
"top_indices": [
[679, 318, 262, 1772, 286],
[679, 318, 257, 2888, 286],
[679, 318, 635, 257, 2888],
[679, 318, 635, 262, 1772],
],
"probabilities": [
0.00410857517272234,
0.002775674918666482,
0.0008492876659147441,
0.00035627084434963763,
],
"top_tokens": [
["ĠHe", "Ġis", "Ġthe", "Ġauthor", "Ġof"],
["ĠHe", "Ġis", "Ġa", "Ġmember", "Ġof"],
["ĠHe", "Ġis", "Ġalso", "Ġa", "Ġmember"],
["ĠHe", "Ġis", "Ġalso", "Ġthe", "Ġauthor"],
],
},
)
],
)
def test_sanitize_outputs(self, output, result):
assert self.endpoint._sanitize_outputs(output) == result
|
allennlp-demo-main
|
api/allennlp_demo/next_token_lm/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/elmo_snli/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class ElmoSnliModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = ElmoSnliModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/elmo_snli/api.py
|
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.elmo_snli.api import ElmoSnliModelEndpoint
class TestElmoSnliModelEndpoint(ModelEndpointTestCase):
endpoint = ElmoSnliModelEndpoint()
predict_input = {
"hypothesis": "Two women are sitting on a blanket near some rocks talking about politics.",
"premise": "Two women are wandering along the shore drinking iced tea.",
}
|
allennlp-demo-main
|
api/allennlp_demo/elmo_snli/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/fine_grained_ner/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class FineGrainedNerModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = FineGrainedNerModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/fine_grained_ner/api.py
|
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.fine_grained_ner.api import FineGrainedNerModelEndpoint
class TestFineGrainedNerModelEndpoint(ModelEndpointTestCase):
endpoint = FineGrainedNerModelEndpoint()
predict_input = {
"sentence": "Did Uriah honestly think he could beat The Legend of Zelda in under three hours?"
}
|
allennlp-demo-main
|
api/allennlp_demo/fine_grained_ner/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/naqanet/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class NAQANetModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = NAQANetModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/naqanet/api.py
|
from overrides import overrides
from allennlp_demo.common.testing import RcModelEndpointTestCase
from allennlp_demo.naqanet.api import NAQANetModelEndpoint
class TestNAQANetModelEndpoint(RcModelEndpointTestCase):
endpoint = NAQANetModelEndpoint()
@overrides
def check_predict_result(self, result):
assert "answer" in result
assert len(result["passage_question_attention"]) > 0
|
allennlp-demo-main
|
api/allennlp_demo/naqanet/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/adversarial_binary_gender_bias_mitigated_roberta_snli/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class AdversarialBinaryGenderBiasMitigatedRobertaSnliModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = AdversarialBinaryGenderBiasMitigatedRobertaSnliModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/adversarial_binary_gender_bias_mitigated_roberta_snli/api.py
|
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.adversarial_binary_gender_bias_mitigated_roberta_snli.api import (
AdversarialBinaryGenderBiasMitigatedRobertaSnliModelEndpoint,
)
class TestAdversarialBinaryGenderBiasMitigatedRobertaSnliModelEndpoint(ModelEndpointTestCase):
endpoint = AdversarialBinaryGenderBiasMitigatedRobertaSnliModelEndpoint()
predict_input = {
"premise": "An accountant can afford a computer.",
"hypothesis": "A gentleman can afford a computer.",
}
|
allennlp-demo-main
|
api/allennlp_demo/adversarial_binary_gender_bias_mitigated_roberta_snli/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/lerc/__init__.py
|
|
import os
from allennlp.common.util import import_module_and_submodules
from allennlp_demo.common import config, http
class LERCModelEndpoint(http.ModelEndpoint):
def __init__(self):
import_module_and_submodules("lerc")
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = LERCModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/lerc/api.py
|
import math
from overrides import overrides
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.lerc.api import LERCModelEndpoint
class TestLERCModelEndpoint(ModelEndpointTestCase):
endpoint = LERCModelEndpoint()
predict_input = {
"context": "Robin wept in front of Skylar who was sick of seeing her cry.",
"question": "What will happen to Robin?",
"reference": "be scolded",
"candidate": "be sad",
}
@overrides
def check_predict_result(self, result):
print(result)
assert math.isclose(result["pred_score"], 0.223822, abs_tol=1e-5)
|
allennlp-demo-main
|
api/allennlp_demo/lerc/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/masked_lm/__init__.py
|
|
import os
from typing import Dict
from allennlp.interpret.attackers import Attacker, Hotflip
from allennlp_demo.common import config, http
class MaskedLmModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
def load_attackers(self) -> Dict[str, Attacker]:
hotflip = Hotflip(self.predictor, "bert")
hotflip.initialize()
return {"hotflip": hotflip}
if __name__ == "__main__":
endpoint = MaskedLmModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/masked_lm/api.py
|
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.masked_lm.api import MaskedLmModelEndpoint
class TestMaskedLmModelEndpoint(ModelEndpointTestCase):
endpoint = MaskedLmModelEndpoint()
predict_input = {"sentence": "The doctor ran to the emergency room to see [MASK] patient."}
|
allennlp-demo-main
|
api/allennlp_demo/masked_lm/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/transformer_qa/__init__.py
|
|
import os
from allennlp.common.util import JsonDict
from allennlp_demo.common import config, http
class TransformerQaModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
def predict(self, inputs: JsonDict):
# For compatability with other RC models.
if "passage" in inputs:
inputs["context"] = inputs.pop("passage")
return super().predict(inputs)
def load_interpreters(self):
# The interpreters don't work with this model right now.
return {}
def load_attackers(self):
# The attackers don't work with this model right now.
return {}
if __name__ == "__main__":
endpoint = TransformerQaModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/transformer_qa/api.py
|
from allennlp_demo.common.testing import RcModelEndpointTestCase
from allennlp_demo.transformer_qa.api import TransformerQaModelEndpoint
class TestTransformerQaModelEndpoint(RcModelEndpointTestCase):
endpoint = TransformerQaModelEndpoint()
|
allennlp-demo-main
|
api/allennlp_demo/transformer_qa/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/named_entity_recognition/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class NerModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = NerModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/named_entity_recognition/api.py
|
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.named_entity_recognition.api import NerModelEndpoint
class TestNerModelEndpoint(ModelEndpointTestCase):
endpoint = NerModelEndpoint()
predict_input = {
"sentence": "Did Uriah honestly think he could beat The Legend of Zelda in under three hours?"
}
|
allennlp-demo-main
|
api/allennlp_demo/named_entity_recognition/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/roberta_mnli/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class RobertaMnliModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = RobertaMnliModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/roberta_mnli/api.py
|
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.roberta_mnli.api import RobertaMnliModelEndpoint
class TestRobertaMnliModelEndpoint(ModelEndpointTestCase):
endpoint = RobertaMnliModelEndpoint()
predict_input = {
"hypothesis": "Two women are sitting on a blanket near some rocks talking about politics.",
"premise": "Two women are wandering along the shore drinking iced tea.",
}
|
allennlp-demo-main
|
api/allennlp_demo/roberta_mnli/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/bidaf/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class BidafModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = BidafModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/bidaf/api.py
|
from allennlp_demo.common.testing import RcModelEndpointTestCase
from allennlp_demo.bidaf.api import BidafModelEndpoint
class TestBidafModelEndpoint(RcModelEndpointTestCase):
endpoint = BidafModelEndpoint()
|
allennlp-demo-main
|
api/allennlp_demo/bidaf/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/roberta_sentiment_analysis/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class RobertaSentimentAnalysisModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = RobertaSentimentAnalysisModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/roberta_sentiment_analysis/api.py
|
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.roberta_sentiment_analysis.api import RobertaSentimentAnalysisModelEndpoint
class TestRobertaSentimentAnalysisModelEndpoint(ModelEndpointTestCase):
endpoint = RobertaSentimentAnalysisModelEndpoint()
predict_input = {"sentence": "a very well-made, funny and entertaining picture."}
|
allennlp-demo-main
|
api/allennlp_demo/roberta_sentiment_analysis/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/model_cards/__init__.py
|
|
import logging
import flask
from typing import Dict
from allennlp_demo.common.logs import configure_logging
from allennlp_models.pretrained import get_pretrained_models
logger = logging.getLogger(__name__)
class ModelCardsService(flask.Flask):
def __init__(self, name: str = "model-cards"):
super().__init__(name)
configure_logging(self)
# We call this once and cache the results. It takes a little memory (~4 MB) but makes
# everything a lot faster.
self.cards_by_id = get_pretrained_models()
@self.route("/", methods=["GET"])
def all_model_cards():
cards: Dict[str, Dict] = {}
for id, card in self.cards_by_id.items():
cards[id] = card.to_dict()
return flask.jsonify(cards)
if __name__ == "__main__":
app = ModelCardsService()
app.run(host="0.0.0.0", port=8000)
|
allennlp-demo-main
|
api/allennlp_demo/model_cards/api.py
|
from .api import ModelCardsService
def test_model_cards():
app = ModelCardsService()
client = app.test_client()
response = client.get("/")
assert response.status_code == 200
assert len(response.json) > 0
bidaf = response.json.get("rc-bidaf")
assert bidaf is not None
assert bidaf.get("display_name") == "BiDAF"
assert bidaf.get("contact") == "[email protected]"
|
allennlp-demo-main
|
api/allennlp_demo/model_cards/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/bidaf_elmo/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class BidafElmoModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = BidafElmoModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/bidaf_elmo/api.py
|
from overrides import overrides
import pytest
from allennlp_demo.bidaf_elmo.api import BidafElmoModelEndpoint
from allennlp_demo.common.testing import RcModelEndpointTestCase
class TestBidafElmoModelEndpoint(RcModelEndpointTestCase):
endpoint = BidafElmoModelEndpoint()
@pytest.mark.skip("Takes too long")
@overrides
def test_interpret(self):
pass
@pytest.mark.skip("Takes too long")
@overrides
def test_attack(self):
pass
|
allennlp-demo-main
|
api/allennlp_demo/bidaf_elmo/test_api.py
|
import json
from dataclasses import dataclass, field
from typing import Dict, Any, Optional, List
from allennlp.predictors import Predictor
VALID_ATTACKERS = ["hotflip", "input_reduction"]
VALID_INTERPRETERS = ["simple_gradient", "smooth_gradient", "integrated_gradient"]
@dataclass(frozen=True)
class Model:
"""
Class capturing the options we support per model.
"""
id: str
"""
A unique name to identify each demo.
"""
archive_file: str
"""
The path to the model's archive_file.
"""
pretrained_model_id: Optional[str] = None
"""
The ID of a pretrained model to use from `allennlp_models.pretrained`.
"""
predictor_name: Optional[str] = None
"""
Optional predictor name to override the default predictor associated with the archive.
This is ignored if `pretrained_model_id` is given.
"""
overrides: Optional[Dict[str, Any]] = None
"""
Optional parameter overrides to pass through when loading the archive.
This is ignored if `pretrained_model_id` is given.
"""
attackers: List[str] = field(default_factory=lambda: VALID_ATTACKERS)
"""
List of valid attackers to use.
"""
interpreters: List[str] = field(default_factory=lambda: VALID_INTERPRETERS)
"""
List of valid interpreters to use.
"""
use_old_load_method: bool = False
"""
Some models that run on older versions need to be load differently.
"""
@classmethod
def from_file(cls, path: str) -> "Model":
with open(path, "r") as fh:
raw = json.load(fh)
if "pretrained_model_id" in raw:
from allennlp_models.pretrained import get_pretrained_models
model_card = get_pretrained_models()[raw["pretrained_model_id"]]
raw["archive_file"] = model_card.model_usage.archive_file
raw["predictor_name"] = model_card.registered_predictor_name
out = cls(**raw)
# Do some validation.
for attacker in out.attackers:
assert attacker in VALID_ATTACKERS, f"invalid attacker {attacker}"
for interpreter in out.interpreters:
assert interpreter in VALID_INTERPRETERS, f"invalid interpreter {interpreter}"
if out.use_old_load_method:
assert out.pretrained_model_id is None
return out
def load_predictor(self) -> Predictor:
if self.pretrained_model_id is not None:
from allennlp_models.pretrained import load_predictor
return load_predictor(self.pretrained_model_id, overrides=self.overrides)
assert self.archive_file is not None
if self.use_old_load_method:
from allennlp.models.archival import load_archive
# Older versions require overrides to be passed as a JSON string.
o = json.dumps(self.overrides) if self.overrides is not None else None
archive = load_archive(self.archive_file, overrides=o)
return Predictor.from_archive(archive, self.predictor_name)
return Predictor.from_path(
self.archive_file, predictor_name=self.predictor_name, overrides=self.overrides
)
|
allennlp-demo-main
|
api/allennlp_demo/common/config.py
|
allennlp-demo-main
|
api/allennlp_demo/common/__init__.py
|
|
import os
import sys
import json
import logging
import time
from typing import Mapping
from flask import Flask, request, Response, g
from dataclasses import dataclass, asdict
from typing import Optional
@dataclass(frozen=True)
class RequestLogEntry:
status: int
method: str
path: str
query: dict
request_data: Optional[dict]
response_data: Optional[dict]
ip: str
forwarded_for: Optional[str]
latency_ms: float
cached: bool
class JsonLogFormatter(logging.Formatter):
"""
Outputs JSON logs with a structure that works well with Google Cloud Logging.
"""
def format(self, r: logging.LogRecord) -> str:
# Exceptions get special handling.
if r.exc_info is not None:
# In development we just return the exception with the default formatting, as this
# is easiest for the end user.
if os.getenv("FLASK_ENV") == "development":
return super().format(r)
# Otherwise we still output them as JSON
m = r.getMessage() % r.__dict__
return json.dumps(
{
"logname": r.name,
"severity": r.levelname,
"message": m,
"exception": self.formatException(r.exc_info),
"stack": self.formatStack(r.stack_info),
}
)
if isinstance(r.msg, Mapping):
return json.dumps({"logname": r.name, "severity": r.levelname, **r.msg})
else:
m = r.getMessage() % r.__dict__
return json.dumps({"logname": r.name, "severity": r.levelname, "message": m})
def configure_logging(app: Flask, log_payloads: bool = False):
"""
Setup logging in a way that makes sense for demo API endpoints.
"""
# Reduce chatter from AllenNLP
logging.getLogger("allennlp").setLevel(logging.WARN)
# Output logs as JSON
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(JsonLogFormatter())
logging.basicConfig(level=os.getenv("LOG_LEVEL", logging.INFO), handlers=[handler])
# Disable the default request log, as we add our own
logging.getLogger("werkzeug").setLevel(logging.WARN)
# Capture when a request is received so that we can keep track of how long it took to process.
@app.before_request
def capture_start_time() -> None:
g.start = time.perf_counter()
# Output a request log our own with information we're interested in.
@app.after_request
def log_request(r: Response) -> Response:
latency_ms = (time.perf_counter() - g.start) * 1000
rl = RequestLogEntry(
r.status_code,
request.method,
request.path,
request.args,
None if not log_payloads else request.get_json(silent=True),
None if not log_payloads else r.get_json(silent=True),
request.remote_addr,
request.headers.get("X-Forwarded-For"),
latency_ms,
r.headers.get("X-Cache-Hit", "0") == "1",
)
logging.getLogger("request").info(asdict(rl))
return r
|
allennlp-demo-main
|
api/allennlp_demo/common/logs.py
|
from functools import lru_cache
from dataclasses import asdict
import json
from typing import Callable, Dict
from flask import Flask, Request, Response, after_this_request, request, jsonify
from allennlp.version import VERSION
from allennlp.predictors.predictor import JsonDict
from allennlp.interpret.saliency_interpreters import (
SaliencyInterpreter,
SimpleGradient,
SmoothGradient,
IntegratedGradient,
)
from allennlp.interpret.attackers import Attacker, Hotflip, InputReduction
from allennlp_demo.common import config
from allennlp_demo.common.logs import configure_logging
def no_cache(request: Request) -> bool:
"""
Returns True if the "no_cache" query string argument is present in the provided request.
This provides a consistent mechanism across all endpoints for disabling the cache.
"""
return "no_cache" in request.args
def with_cache_hit_response_headers(fn: Callable, *args):
"""
Calls the provided function with the given arguments and returns the results. If the results
are produced by a cache a HTTP header is added to the response.
The provided function must be memoized using the functools.lru_cache decorator.
"""
# This allows us to determine if the response we're serving was cached. It's safe to
# do because we use a single-threaded server.
pre_hits = fn.cache_info().hits # type: ignore
r = fn(*args)
# If it was a cache hit add a HTTP header to the response
if fn.cache_info().hits - pre_hits == 1: # type: ignore
@after_this_request
def add_header(resp: Response) -> Response:
resp.headers["X-Cache-Hit"] = "1"
return resp
return r
class NotFoundError(RuntimeError):
pass
class UnknownInterpreterError(NotFoundError):
def __init__(self, interpreter_id: str):
super().__init__(f"No interpreter with id '{interpreter_id}'")
class InvalidInterpreterError(NotFoundError):
def __init__(self, interpreter_id: str):
super().__init__(f"Interpreter with id '{interpreter_id}' is not supported for this model")
class UnknownAttackerError(NotFoundError):
def __init__(self, attacker_id: str):
super().__init__(f"No attacker with id '{attacker_id}'")
class InvalidAttackerError(NotFoundError):
def __init__(self, attacker_id: str):
super().__init__(f"Attacker with id '{attacker_id}' is not supported for this model")
class ModelEndpoint:
"""
Class capturing a single model endpoint which provides a HTTP API suitable for use by
the AllenNLP demo.
This class can be extended to implement custom functionality.
"""
def __init__(self, model: config.Model, log_payloads: bool = False):
self.model = model
self.app = Flask(model.id)
self.configure_logging(log_payloads)
self.predictor = model.load_predictor()
self.interpreters = self.load_interpreters()
self.attackers = self.load_attackers()
self.configure_error_handling()
# By creating the LRU caches when the class is instantiated, we can
# be sure that the caches are specific to the instance, and not the class,
# i.e. every instance will have its own set of caches.
@lru_cache(maxsize=1024)
def predict_with_cache(inputs: str) -> JsonDict:
return self.predict(json.loads(inputs))
@lru_cache(maxsize=1024)
def interpret_with_cache(interpreter_id: str, inputs: str) -> JsonDict:
return self.interpret(interpreter_id, json.loads(inputs))
@lru_cache(maxsize=1024)
def attack_with_cache(attacker_id: str, attack: str) -> JsonDict:
return self.attack(attacker_id, json.loads(attack))
self.predict_with_cache = predict_with_cache
self.interpret_with_cache = interpret_with_cache
self.attack_with_cache = attack_with_cache
self.setup_routes()
def load_interpreters(self) -> Dict[str, SaliencyInterpreter]:
"""
Returns a mapping of interpreters keyed by a unique identifier. Requests to
`/interpret/:id` will invoke the interpreter with the provided `:id`. Override this method
to add or remove interpreters.
"""
interpreters: Dict[str, SaliencyInterpreter] = {}
if "simple_gradient" in self.model.interpreters:
interpreters["simple_gradient"] = SimpleGradient(self.predictor)
if "smooth_gradient" in self.model.interpreters:
interpreters["smooth_gradient"] = SmoothGradient(self.predictor)
if "integrated_gradient" in self.model.interpreters:
interpreters["integrated_gradient"] = IntegratedGradient(self.predictor)
return interpreters
def load_attackers(self) -> Dict[str, Attacker]:
"""
Returns a mapping of attackers keyed by a unique identifier. Requests to `/attack/:id`
will invoke the attacker with the provided `:id`. Override this method to add or remove
attackers.
"""
attackers: Dict[str, Attacker] = {}
if "hotflip" in self.model.attackers:
hotflip = Hotflip(self.predictor)
hotflip.initialize()
attackers["hotflip"] = hotflip
if "input_reduction" in self.model.attackers:
attackers["input_reduction"] = InputReduction(self.predictor)
return attackers
def info(self) -> str:
"""
Returns basic information about the model and the version of AllenNLP.
"""
return jsonify({**asdict(self.model), "allennlp": VERSION})
def predict(self, inputs: JsonDict) -> JsonDict:
"""
Returns predictions.
"""
return self.predictor.predict_json(inputs)
def interpret(self, interpreter_id: str, inputs: JsonDict) -> JsonDict:
"""
Interprets the output of a predictor and assigns sailency scores to each, as to find
inputs that would change the model's prediction some desired manner.
"""
if interpreter_id not in config.VALID_INTERPRETERS:
raise UnknownInterpreterError(interpreter_id)
interp = self.interpreters.get(interpreter_id)
if interp is None:
raise InvalidInterpreterError(interpreter_id)
return interp.saliency_interpret_from_json(inputs)
def attack(self, attacker_id: str, attack: JsonDict) -> JsonDict:
"""
Modifies the input (e.g. by adding or removing tokens) to try to change the model's prediction
in some desired manner.
"""
if attacker_id not in config.VALID_ATTACKERS:
raise UnknownAttackerError(attacker_id)
attacker = self.attackers.get(attacker_id)
if attacker is None:
raise InvalidAttackerError(attacker_id)
return attacker.attack_from_json(**attack)
def configure_logging(self, log_payloads: bool = False) -> None:
configure_logging(self.app, log_payloads=log_payloads)
def configure_error_handling(self) -> None:
def handle_invalid_json(err: json.JSONDecodeError):
return jsonify({"error": str(err)}), 400
self.app.register_error_handler(json.JSONDecodeError, handle_invalid_json)
def handle_404(err: NotFoundError):
return jsonify({"error": str(err)}), 404
self.app.register_error_handler(NotFoundError, handle_404)
def setup_routes(self) -> None:
"""
Binds HTTP paths to verbs supported by a standard model endpoint. You can override this
method to define additional routes or change the default ones.
"""
@self.app.route("/")
def info_handler():
return self.info()
# noop post for image upload, we need an endpoint, but we don't need to save the image
@self.app.route("/noop", methods=["POST"])
def noop():
return ""
@self.app.route("/predict", methods=["POST"])
def predict_handler():
if no_cache(request):
return jsonify(self.predict(request.get_json()))
return jsonify(with_cache_hit_response_headers(self.predict_with_cache, request.data))
@self.app.route("/interpret/<string:interpreter_id>", methods=["POST"])
def interpet_handler(interpreter_id: str):
if no_cache(request):
return jsonify(self.interpret(interpreter_id, request.get_json()))
return jsonify(
with_cache_hit_response_headers(
self.interpret_with_cache, interpreter_id, request.data
)
)
@self.app.route("/attack/<string:attacker_id>", methods=["POST"])
def attack_handler(attacker_id: str):
if no_cache(request):
return jsonify(self.attack(attacker_id, request.get_json()))
return jsonify(
with_cache_hit_response_headers(self.attack_with_cache, attacker_id, request.data)
)
def run(self, port: int = 8000) -> None:
# For simplicity, we use Flask's built in server. This isn't recommended, per:
# https://flask.palletsprojects.com/en/1.1.x/tutorial/deploy/#run-with-a-production-server
#
# That said we think this is preferable because:
# - It's simple. No need to install another WSGI server and add logic for enabling it in
# the right context.
# - Our workload is CPU bound, so event loop based WSGI servers don't get us much.
# - We use Kubernetes to scale horizontally, and run an NGINX proxy at the front-door,
# which adds the resiliency and other things we need for production.
self.app.run(host="0.0.0.0", port=port)
|
allennlp-demo-main
|
api/allennlp_demo/common/http.py
|
from overrides import overrides
from allennlp_demo.common.http import ModelEndpoint
from allennlp_demo.common.testing.model_endpoint_test_case import ModelEndpointTestCase
class RcModelEndpointTestCase(ModelEndpointTestCase):
"""
Provides a solid set of test methods for RC models. Individual methods can be overriden
as necessary.
"""
endpoint: ModelEndpoint
predict_input = {
"passage": (
"A reusable launch system (RLS, or reusable launch vehicle, RLV) "
"is a launch system which is capable of launching a payload into "
"space more than once. This contrasts with expendable launch systems, "
"where each launch vehicle is launched once and then discarded. "
"No completely reusable orbital launch system has ever been created. "
"Two partially reusable launch systems were developed, the "
"Space Shuttle and Falcon 9. The Space Shuttle was partially reusable: "
"the orbiter (which included the Space Shuttle main engines and the "
"Orbital Maneuvering System engines), and the two solid rocket boosters "
"were reused after several months of refitting work for each launch. "
"The external tank was discarded after each flight."
),
"question": "How many partially reusable launch systems were developed?",
}
@overrides
def check_predict_result(self, result):
assert len(result["best_span"]) > 0
assert len(result["best_span_str"].strip()) > 0
@overrides
def test_interpret(self):
for interpreter_id in self.interpreter_ids():
resp = self.client.post(
f"/interpret/{interpreter_id}",
query_string={"no_cache": True},
json=self.predict_input,
)
assert resp.status_code == 200
assert resp.json is not None
assert len(resp.json["instance_1"]) > 0
assert len(resp.json["instance_1"]["grad_input_1"]) > 0
assert len(resp.json["instance_1"]["grad_input_2"]) > 0
@overrides
def test_attack(self):
data = {
"inputs": self.predict_input,
"input_field_to_attack": "question",
"grad_input_field": "grad_input_2",
}
for attacker_id in self.attacker_ids():
resp = self.client.post(
f"/attack/{attacker_id}", json=data, query_string={"no_cache": True}
)
assert resp.status_code == 200
assert len(resp.json["final"]) > 0
assert len(resp.json["original"]) > 0
|
allennlp-demo-main
|
api/allennlp_demo/common/testing/rc.py
|
from allennlp_demo.common.testing.model_endpoint_test_case import ModelEndpointTestCase
from allennlp_demo.common.testing.rc import RcModelEndpointTestCase
|
allennlp-demo-main
|
api/allennlp_demo/common/testing/__init__.py
|
import os
from pathlib import Path
from typing import Optional, Any, Dict, List
from flask.testing import FlaskClient
from flask.wrappers import Response
from allennlp_demo.common.http import ModelEndpoint
from allennlp_demo.common.config import VALID_ATTACKERS, VALID_INTERPRETERS
class ModelEndpointTestCase:
"""
A base class to run model endpoint tests.
Each endpoint should have a corresponding test class that derives from this.
"""
PROJECT_ROOT: Path = (Path(__file__).parent / os.pardir / os.pardir / os.pardir).resolve()
"""
This points to the absolute path of `api/`. So, for example, this file is located
in `PROJECT_ROOT / allennlp_demo / common / testing`.
"""
endpoint: ModelEndpoint
"""
Should be defined by each subclass.
"""
predict_input: Dict[str, Any]
"""
Payload to send to the /predict route.
"""
_client: Optional[FlaskClient] = None
@property
def client(self) -> FlaskClient:
"""
Provides a Flask test client that you can use to send requests to the model endpoint.
"""
if self._client is None:
self._client = self.endpoint.app.test_client()
return self._client
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def setup_method(self):
# Clear the caches before each call.
self.endpoint.predict_with_cache.cache_clear()
self.endpoint.interpret_with_cache.cache_clear()
self.endpoint.attack_with_cache.cache_clear()
def teardown_method(self):
pass
def interpreter_ids(self) -> List[str]:
return list(self.endpoint.interpreters.keys())
def attacker_ids(self) -> List[str]:
return list(self.endpoint.attackers.keys())
def test_predict(self):
"""
Test the /predict route.
"""
response = self.client.post("/predict", json=self.predict_input)
self.check_response_okay(response, cache_hit=False)
self.check_predict_result(response.json)
response = self.client.post("/predict", json=self.predict_input)
self.check_response_okay(response, cache_hit=True)
self.check_predict_result(response.json)
response = self.client.post(
"/predict", query_string={"no_cache": True}, json=self.predict_input
)
self.check_response_okay(response, cache_hit=False)
self.check_predict_result(response.json)
def test_predict_invalid_input(self):
"""
Ensure a 400 is returned when bad input is given to the /predict route.
"""
response = self.client.post(
"/predict", data="{ invalid: json }", headers={"Content-Type": "application/json"}
)
assert response.status_code == 400
def test_info(self):
"""
Ensure the `/` info / health check route works.
"""
response = self.client.get("/")
self.check_response_okay(response)
def check_response_okay(self, response: Response, cache_hit: bool = False) -> None:
"""
Ensure the response from a route is okay.
"""
assert response.status_code == 200
assert response.json is not None
if not cache_hit:
assert "X-Cache-Hit" not in response.headers
else:
assert response.headers["X-Cache-Hit"] == "1"
def check_predict_result(self, result: Dict[str, Any]) -> None:
"""
Subclasses can override this method to run additional checks on the JSON
result of the /predict route.
"""
pass
def test_interpret(self) -> None:
"""
Subclasses can override this method to test interpret functionality.
"""
pass
def test_attack(self) -> None:
"""
Subclasses can override this method to test attack functionality.
"""
pass
def test_unknown_interpreter_id(self):
resp = self.client.post("/interpret/invalid", json={})
assert resp.status_code == 404
assert resp.json["error"] == "No interpreter with id 'invalid'"
def test_unknown_attacker_id(self):
resp = self.client.post("/attack/invalid", json={})
assert resp.status_code == 404
assert resp.json["error"] == "No attacker with id 'invalid'"
def test_invalid_interpreter_id(self):
for interpreter_id in VALID_INTERPRETERS:
if interpreter_id not in self.interpreter_ids():
resp = self.client.post(f"/interpret/{interpreter_id}", json={})
assert resp.status_code == 404
assert (
resp.json["error"]
== f"Interpreter with id '{interpreter_id}' is not supported for this model"
), resp.json["error"]
def test_invalid_attacker_id(self):
for attacker_id in VALID_ATTACKERS:
if attacker_id not in self.attacker_ids():
resp = self.client.post(f"/attack/{attacker_id}", json={})
assert resp.status_code == 404
assert (
resp.json["error"]
== f"Attacker with id '{attacker_id}' is not supported for this model"
), resp.json["error"]
|
allennlp-demo-main
|
api/allennlp_demo/common/testing/model_endpoint_test_case.py
|
allennlp-demo-main
|
api/allennlp_demo/open_information_extraction/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class OpenInformationExtractionModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = OpenInformationExtractionModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/open_information_extraction/api.py
|
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.open_information_extraction.api import OpenInformationExtractionModelEndpoint
class TestOpenInformationExtractionModelEndpoint(ModelEndpointTestCase):
endpoint = OpenInformationExtractionModelEndpoint()
predict_input = {"sentence": "In December, John decided to join the party."}
|
allennlp-demo-main
|
api/allennlp_demo/open_information_extraction/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/nlvr_parser/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class NlvrParserModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = NlvrParserModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/nlvr_parser/api.py
|
from allennlp_demo.nlvr_parser.api import NlvrParserModelEndpoint
from allennlp_demo.common.testing import ModelEndpointTestCase
class TestNlvrParserModelEndpoint(ModelEndpointTestCase):
endpoint = NlvrParserModelEndpoint()
predict_input = {
"sentence": "there is exactly one yellow object touching the edge",
"structured_rep": [
[
{"y_loc": 13, "type": "square", "color": "Yellow", "x_loc": 13, "size": 20},
{"y_loc": 20, "type": "triangle", "color": "Yellow", "x_loc": 44, "size": 30},
{"y_loc": 90, "type": "circle", "color": "#0099ff", "x_loc": 52, "size": 10},
],
[
{"y_loc": 57, "type": "square", "color": "Black", "x_loc": 17, "size": 20},
{"y_loc": 30, "type": "circle", "color": "#0099ff", "x_loc": 76, "size": 10},
{"y_loc": 12, "type": "square", "color": "Black", "x_loc": 35, "size": 10},
],
[
{"y_loc": 40, "type": "triangle", "color": "#0099ff", "x_loc": 26, "size": 20},
{"y_loc": 70, "type": "triangle", "color": "Black", "x_loc": 70, "size": 30},
{"y_loc": 19, "type": "square", "color": "Black", "x_loc": 35, "size": 10},
],
],
}
|
allennlp-demo-main
|
api/allennlp_demo/nlvr_parser/test_api.py
|
allennlp-demo-main
|
api/allennlp_demo/atis_parser/__init__.py
|
|
import os
from allennlp_demo.common import config, http
class AtisParserModelEndpoint(http.ModelEndpoint):
def __init__(self):
c = config.Model.from_file(os.path.join(os.path.dirname(__file__), "model.json"))
super().__init__(c)
if __name__ == "__main__":
endpoint = AtisParserModelEndpoint()
endpoint.run()
|
allennlp-demo-main
|
api/allennlp_demo/atis_parser/api.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.