tasktemplates / templates.json
ivykopal's picture
Upload templates.json
8b393c0 verified
raw
history blame
29.3 kB
{
"train": [
{
"dataset": "aeslc",
"model": "T5",
"name": "aeslc-prompt-t5",
"input": "summarize: {{email_body}}",
"target": "{{subject_line}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
{
"dataset": "anli",
"model": "T5",
"name": "anli-prompt-t5",
"input": "premise: {{premise}} hypothesis: {{hypothesis}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "billsum",
"model": "T5",
"name": "billsum-prompt-t5",
"input": "summarize: {{text}}",
"target": "{{summary}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
{
"dataset": "boolq",
"model": "T5",
"name": "boolq-prompt-t5",
"input": "boolq question: {{question}} passage: {{passage}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "c4",
"model": "T5",
"name": "c4-prompt-t5",
"input": "",
"target": "{{text}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
{
"dataset": "cb",
"model": "T5",
"name": "cb-prompt-t5",
"input": "cb premise: {{premise}} hypothesis: {{hypothesis}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy",
"multiclass_f1"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy",
"multiclass_f1"
]
},
{
"dataset": "cnn_dailymail",
"model": "T5",
"name": "cnn_dailymail-prompt-t5",
"input": "summarize: {{article}}",
"target": "{{highlights}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
{
"dataset": "cola",
"model": "T5",
"name": "cola-prompt-t5",
"input": "cola sentence: {{sentence}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"matthews_corrcoef"
]
},
"languages": [
"en"
],
"metrics": [
"matthews_corrcoef"
]
},
{
"dataset": "common_gen",
"model": "T5",
"name": "common_gen-prompt-t5",
"input": "generate: {{lambda concepts: \" \".join(concepts)}}",
"target": "{{target}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
{
"dataset": "copa",
"model": "T5",
"name": "copa-prompt-t5",
"input": "copa premise: {{premise}} choice1: {{choice1}} choice2: {{choice2}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "cosmos_qa",
"model": "T5",
"name": "cosmos_qa-prompt-t5",
"input": "question: {{question}} context: {{context}} choice0: {{answer0}} choice1: {{answer1}} choice2: {{answer2}} choice3: {{answer3}}",
"target": "{{lambda label: str(label)}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"squad"
]
},
"languages": [
"en"
],
"metrics": [
"squad"
]
},
{
"dataset": "cxc",
"model": "T5",
"name": "cxc-prompt-t5",
"input": "sentence1: {{sentence1}} sentence2: {{sentence2}}",
"target": "{{lambda score: np.round((score * 5) / 5, decimals=1)}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"pearson_corrcoef",
"spearman_corrcoef"
]
},
"languages": [
"en"
],
"metrics": [
"pearson_corrcoef",
"spearman_corrcoef"
]
},
{
"dataset": "doc_nli",
"model": "T5",
"name": "doc_nli-prompt-t5",
"input": "premise: {{premise}} hypothesis: {{hypothesis}}",
"target": "{{label}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "drop",
"model": "T5",
"name": "drop-prompt-t5",
"input": "question: {{question}} context: {{passage}}",
"target": "{{answers_spans[\"spans\"][0]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
},
"languages": [
"en"
],
"metrics": [
"squad"
]
},
{
"dataset": "gigaword",
"model": "T5",
"name": "gigaword-prompt-t5",
"input": "summarize: {{document}}",
"target": "{{summary}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
{
"dataset": "hellaswag",
"model": "T5",
"name": "hellaswag-prompt-t5",
"input": "context: {{ctx}} ending0: {{endings[0]}} ending1: {{endings[1]}} ending2: {{endings[2]}} ending3: {{endings[3]}}",
"target": "{{lambda label: str(label)}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "hotpot_qa",
"model": "T5",
"name": "hotpot_qa-prompt-t5",
"input": "question: {{question}} context: {{\"\".join(context[\"sentences\"][0])}}",
"target": "{{answer}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
},
"languages": [
"en"
],
"metrics": [
"squad"
]
},
{
"dataset": "mnli",
"model": "T5",
"name": "mnli-prompt-t5",
"input": "mnli premise: {{premise}} hypothesis: {{hypothesis}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "mrpc",
"model": "T5",
"name": "mrpc-prompt-t5",
"input": "mrpc sentence1: {{sentence1}} sentence2: {{sentence2}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy",
"f1_invalid"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy",
"f1_invalid"
]
},
{
"dataset": "mrqa",
"model": "T5",
"name": "mrqa-prompt-t5",
"input": "question: {{question}} context: {{context}}",
"target": "{{answers[0]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
},
"languages": [
"en"
],
"metrics": [
"squad"
]
},
{
"dataset": "multirc",
"model": "T5",
"name": "multirc-prompt-t5",
"input": "multirc question: {{question}} answer: {{answer}} paragraph: {{paragraph}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"exact_match",
"multirc_f1"
],
"preprocessing": [
"remove_markup"
]
},
"languages": [
"en"
],
"metrics": [
"exact_match",
"multirc_f1"
]
},
{
"dataset": "multi_news",
"model": "T5",
"name": "multi_news-prompt-t5",
"input": "summarize: {{document}}",
"target": "{{summary}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
{
"dataset": "multi_nli",
"model": "T5",
"name": "multi_nli-prompt-t5",
"input": "premise: {{premise}} hypothesis: {{hypothesis}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "newsqa",
"model": "T5",
"name": "newsqa-prompt-t5",
"input": "question: {{question}} context: {{context}}",
"target": "{{answer[0]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"rouge"
],
"preprocessing": [
"pad_punctuation"
]
},
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
{
"dataset": "newsqa",
"model": "T5",
"name": "newsqa-prompt-t5-without-context",
"input": "question: {{question}}",
"target": "{{answer[0]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"rouge"
],
"preprocessing": [
"pad_punctuation"
]
},
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
{
"dataset": "newsroom",
"model": "T5",
"name": "newsroom-prompt-t5",
"input": "summarize: {{text}}",
"target": "{{summary}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
{
"dataset": "nq_open",
"model": "T5",
"name": "nq_open-prompt-t5",
"input": "nq question: {{question}}",
"target": "{{answer[0]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
},
"languages": [
"en"
],
"metrics": [
"squad"
]
},
{
"dataset": "piqa",
"model": "T5",
"name": "piqa-prompt-t5",
"input": "question: {{goal}} choice1: {{sol1}} choice2: {{sol2}}",
"target": "{{lambda label: str(label)}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "qnli",
"model": "T5",
"name": "qnli-prompt-t5",
"input": "qnli question: {{question}} sentence: {{sentence}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "qqp",
"model": "T5",
"name": "qqp-prompt-t5",
"input": "qqp question1: {{question1}} question2: {{question2}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy",
"f1_invalid"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy",
"f1_invalid"
]
},
{
"dataset": "race",
"model": "T5",
"name": "race-prompt-t5",
"input": "question: {{question}} context: {{article}} choice0: {{options[0]}} choice1: {{options[1]}} choice2: {{options[2]}} choice3: {{options[3]}}",
"target": "{{lambda answer: str(ord(answer) - ord(\"A\"))}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "record",
"model": "T5",
"name": "record-prompt-t5",
"input": "record query: {{query}} entities: {{\", \".join(entities)}} passage: {{passage}}",
"target": "{{answers[0]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": [
"record_preprocess"
]
},
"languages": [
"en"
],
"metrics": [
"squad"
]
},
{
"dataset": "rte",
"model": "T5",
"name": "rte-prompt-t5",
"input": "rte sentence1: {{sentence1}} sentence2: {{sentence2}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "samsum",
"model": "T5",
"name": "samsum-prompt-t5",
"input": "summarize: {{dialogue}}",
"target": "{{summary}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
{
"dataset": "search_qa",
"model": "T5",
"name": "search_qa-prompt-t5",
"input": "question: {{question}}",
"target": "{{answer}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
},
"languages": [
"en"
],
"metrics": [
"squad"
]
},
{
"dataset": "snli",
"model": "T5",
"name": "snli-prompt-t5",
"input": "premise: {{premise}} hypothesis: {{hypothesis}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "social_i_qa",
"model": "T5",
"name": "social_i_qa-prompt-t5",
"input": "question: {{question}} context: {{context}} || choice0: {{answerA}} || choice1: {{answerB}} || choice2: {{answerC}}",
"target": "{{lambda label: str(int(label) - 1)}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "squad",
"model": "T5",
"name": "squad-prompt-t5",
"input": "question: {{question}} context: {{context}}",
"target": "{{answers[\"text\"][0]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
},
"languages": [
"en"
],
"metrics": [
"squad"
]
},
{
"dataset": "squad",
"model": "T5",
"name": "squad-prompt-trivia-t5",
"input": "squad trivia question: {{question}}",
"target": "{{answers[\"text\"][0]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
},
"languages": [
"en"
],
"metrics": [
"squad"
]
},
{
"dataset": "sst2",
"model": "T5",
"name": "sst2-prompt-t5",
"input": "sst2 sentence: {{sentence}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": [
"pad_punctuation"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "stsb",
"model": "T5",
"name": "stsb-prompt-t5",
"input": "stsb sentence1: {{sentence1}} sentence2: {{sentence2}}",
"target": "{{lambda label: np.round((label * 5) / 5, decimals=1)}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"pearson_corrcoef",
"spearman_corrcoef"
]
},
"languages": [
"en"
],
"metrics": [
"pearson_corrcoef",
"spearman_corrcoef"
]
},
{
"dataset": "wic",
"model": "T5",
"name": "wic-prompt-t5",
"input": "wic sentence1: {{sentence1}} sentence2: {{sentence2}} word: {{word}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "wiki_auto",
"model": "T5",
"name": "wiki_auto-prompt-t5",
"input": "{{source}}",
"target": "{{target}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"bleu"
]
},
"languages": [
"en"
],
"metrics": [
"bleu"
]
},
{
"dataset": "wiki_lingua",
"model": "T5",
"name": "wiki_lingua-prompt-t5",
"input": "{{source_aligned[\"en\"]}}",
"target": "{{target_aligned[\"en\"]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
{
"dataset": "winogrande",
"model": "T5",
"name": "winogrande-prompt-t5",
"input": "sentence: {{sentence}} option0: {{option1}} option1: {{option2}}",
"target": "{{lambda answer: str(int(answer) - 1)}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "wnli",
"model": "T5",
"name": "wnli-prompt-t5",
"input": "wnli sentence1: {{sentence1}} sentence2: {{sentence2}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "wsc",
"model": "T5",
"name": "wsc-prompt-t5",
"input": "wsc text: {{text}}",
"target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": [
"wsc_preprocess"
]
},
"languages": [
"en"
],
"metrics": [
"accuracy"
]
},
{
"dataset": "xsum",
"model": "T5",
"name": "xsum-prompt-t5",
"input": "summarize: {{document}}",
"target": "{{target}}",
"metadata": {
"languages": [
"en"
],
"metrics": [
"rouge"
]
},
"languages": [
"en"
],
"metrics": [
"rouge"
]
}
]
}