ivykopal commited on
Commit
c03e3e3
1 Parent(s): 2103d8d

Upload train.jsonl with huggingface_hub

Browse files
Files changed (1) hide show
  1. train.jsonl +48 -0
train.jsonl ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"dataset": "aeslc", "model": "T5", "name": "aeslc-prompt-t5", "input": "summarize: {{email_body}}", "target": "{{subject_line}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]}
2
+ {"dataset": "anli", "model": "T5", "name": "anli-prompt-t5", "input": "premise: {{premise}} hypothesis: {{hypothesis}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]}
3
+ {"dataset": "billsum", "model": "T5", "name": "billsum-prompt-t5", "input": "summarize: {{text}}", "target": "{{summary}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]}
4
+ {"dataset": "boolq", "model": "T5", "name": "boolq-prompt-t5", "input": "boolq question: {{question}} passage: {{passage}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]}
5
+ {"dataset": "c4", "model": "T5", "name": "c4-prompt-t5", "input": "", "target": "{{text}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]}
6
+ {"dataset": "cb", "model": "T5", "name": "cb-prompt-t5", "input": "cb premise: {{premise}} hypothesis: {{hypothesis}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy", "multiclass_f1"]}, "languages": ["en"], "metrics": ["accuracy", "multiclass_f1"]}
7
+ {"dataset": "cnn_dailymail", "model": "T5", "name": "cnn_dailymail-prompt-t5", "input": "summarize: {{article}}", "target": "{{highlights}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]}
8
+ {"dataset": "cola", "model": "T5", "name": "cola-prompt-t5", "input": "cola sentence: {{sentence}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["matthews_corrcoef"]}, "languages": ["en"], "metrics": ["matthews_corrcoef"]}
9
+ {"dataset": "common_gen", "model": "T5", "name": "common_gen-prompt-t5", "input": "generate: {{lambda concepts: \" \".join(concepts)}}", "target": "{{target}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]}
10
+ {"dataset": "copa", "model": "T5", "name": "copa-prompt-t5", "input": "copa premise: {{premise}} choice1: {{choice1}} choice2: {{choice2}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]}
11
+ {"dataset": "cosmos_qa", "model": "T5", "name": "cosmos_qa-prompt-t5", "input": "question: {{question}} context: {{context}} choice0: {{answer0}} choice1: {{answer1}} choice2: {{answer2}} choice3: {{answer3}}", "target": "{{lambda label: str(label)}}", "metadata": {"languages": ["en"], "metrics": ["squad"]}, "languages": ["en"], "metrics": ["squad"]}
12
+ {"dataset": "cxc", "model": "T5", "name": "cxc-prompt-t5", "input": "sentence1: {{sentence1}} sentence2: {{sentence2}}", "target": "{{lambda score: np.round((score * 5) / 5, decimals=1)}}", "metadata": {"languages": ["en"], "metrics": ["pearson_corrcoef", "spearman_corrcoef"]}, "languages": ["en"], "metrics": ["pearson_corrcoef", "spearman_corrcoef"]}
13
+ {"dataset": "doc_nli", "model": "T5", "name": "doc_nli-prompt-t5", "input": "premise: {{premise}} hypothesis: {{hypothesis}}", "target": "{{label}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]}
14
+ {"dataset": "drop", "model": "T5", "name": "drop-prompt-t5", "input": "question: {{question}} context: {{passage}}", "target": "{{answers_spans[\"spans\"][0]}}", "metadata": {"languages": ["en"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["squad"]}
15
+ {"dataset": "gigaword", "model": "T5", "name": "gigaword-prompt-t5", "input": "summarize: {{document}}", "target": "{{summary}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]}
16
+ {"dataset": "hellaswag", "model": "T5", "name": "hellaswag-prompt-t5", "input": "context: {{ctx}} ending0: {{endings[0]}} ending1: {{endings[1]}} ending2: {{endings[2]}} ending3: {{endings[3]}}", "target": "{{lambda label: str(label)}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]}
17
+ {"dataset": "hotpot_qa", "model": "T5", "name": "hotpot_qa-prompt-t5", "input": "question: {{question}} context: {{\"\".join(context[\"sentences\"][0])}}", "target": "{{answer}}", "metadata": {"languages": ["en"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["squad"]}
18
+ {"dataset": "mnli", "model": "T5", "name": "mnli-prompt-t5", "input": "mnli premise: {{premise}} hypothesis: {{hypothesis}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]}
19
+ {"dataset": "mrpc", "model": "T5", "name": "mrpc-prompt-t5", "input": "mrpc sentence1: {{sentence1}} sentence2: {{sentence2}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy", "f1_invalid"]}, "languages": ["en"], "metrics": ["accuracy", "f1_invalid"]}
20
+ {"dataset": "mrqa", "model": "T5", "name": "mrqa-prompt-t5", "input": "question: {{question}} context: {{context}}", "target": "{{answers[0]}}", "metadata": {"languages": ["en"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["squad"]}
21
+ {"dataset": "multirc", "model": "T5", "name": "multirc-prompt-t5", "input": "multirc question: {{question}} answer: {{answer}} paragraph: {{paragraph}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["exact_match", "multirc_f1"], "preprocessing": ["remove_markup"]}, "languages": ["en"], "metrics": ["exact_match", "multirc_f1"]}
22
+ {"dataset": "multi_news", "model": "T5", "name": "multi_news-prompt-t5", "input": "summarize: {{document}}", "target": "{{summary}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]}
23
+ {"dataset": "multi_nli", "model": "T5", "name": "multi_nli-prompt-t5", "input": "premise: {{premise}} hypothesis: {{hypothesis}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]}
24
+ {"dataset": "newsqa", "model": "T5", "name": "newsqa-prompt-t5", "input": "question: {{question}} context: {{context}}", "target": "{{answer[0]}}", "metadata": {"languages": ["en"], "metrics": ["rouge"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["rouge"]}
25
+ {"dataset": "newsqa", "model": "T5", "name": "newsqa-prompt-t5-without-context", "input": "question: {{question}}", "target": "{{answer[0]}}", "metadata": {"languages": ["en"], "metrics": ["rouge"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["rouge"]}
26
+ {"dataset": "newsroom", "model": "T5", "name": "newsroom-prompt-t5", "input": "summarize: {{text}}", "target": "{{summary}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]}
27
+ {"dataset": "nq_open", "model": "T5", "name": "nq_open-prompt-t5", "input": "nq question: {{question}}", "target": "{{answer[0]}}", "metadata": {"languages": ["en"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["squad"]}
28
+ {"dataset": "piqa", "model": "T5", "name": "piqa-prompt-t5", "input": "question: {{goal}} choice1: {{sol1}} choice2: {{sol2}}", "target": "{{lambda label: str(label)}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]}
29
+ {"dataset": "qnli", "model": "T5", "name": "qnli-prompt-t5", "input": "qnli question: {{question}} sentence: {{sentence}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]}
30
+ {"dataset": "qqp", "model": "T5", "name": "qqp-prompt-t5", "input": "qqp question1: {{question1}} question2: {{question2}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy", "f1_invalid"]}, "languages": ["en"], "metrics": ["accuracy", "f1_invalid"]}
31
+ {"dataset": "race", "model": "T5", "name": "race-prompt-t5", "input": "question: {{question}} context: {{article}} choice0: {{options[0]}} choice1: {{options[1]}} choice2: {{options[2]}} choice3: {{options[3]}}", "target": "{{lambda answer: str(ord(answer) - ord(\"A\"))}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]}
32
+ {"dataset": "record", "model": "T5", "name": "record-prompt-t5", "input": "record query: {{query}} entities: {{\", \".join(entities)}} passage: {{passage}}", "target": "{{answers[0]}}", "metadata": {"languages": ["en"], "metrics": ["squad"], "preprocessing": ["record_preprocess"]}, "languages": ["en"], "metrics": ["squad"]}
33
+ {"dataset": "rte", "model": "T5", "name": "rte-prompt-t5", "input": "rte sentence1: {{sentence1}} sentence2: {{sentence2}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]}
34
+ {"dataset": "samsum", "model": "T5", "name": "samsum-prompt-t5", "input": "summarize: {{dialogue}}", "target": "{{summary}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]}
35
+ {"dataset": "search_qa", "model": "T5", "name": "search_qa-prompt-t5", "input": "question: {{question}}", "target": "{{answer}}", "metadata": {"languages": ["en"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["squad"]}
36
+ {"dataset": "snli", "model": "T5", "name": "snli-prompt-t5", "input": "premise: {{premise}} hypothesis: {{hypothesis}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]}
37
+ {"dataset": "social_i_qa", "model": "T5", "name": "social_i_qa-prompt-t5", "input": "question: {{question}} context: {{context}} || choice0: {{answerA}} || choice1: {{answerB}} || choice2: {{answerC}}", "target": "{{lambda label: str(int(label) - 1)}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]}
38
+ {"dataset": "squad", "model": "T5", "name": "squad-prompt-t5", "input": "question: {{question}} context: {{context}}", "target": "{{answers[\"text\"][0]}}", "metadata": {"languages": ["en"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["squad"]}
39
+ {"dataset": "squad", "model": "T5", "name": "squad-prompt-trivia-t5", "input": "squad trivia question: {{question}}", "target": "{{answers[\"text\"][0]}}", "metadata": {"languages": ["en"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["squad"]}
40
+ {"dataset": "sst2", "model": "T5", "name": "sst2-prompt-t5", "input": "sst2 sentence: {{sentence}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["accuracy"]}
41
+ {"dataset": "stsb", "model": "T5", "name": "stsb-prompt-t5", "input": "stsb sentence1: {{sentence1}} sentence2: {{sentence2}}", "target": "{{lambda label: np.round((label * 5) / 5, decimals=1)}}", "metadata": {"languages": ["en"], "metrics": ["pearson_corrcoef", "spearman_corrcoef"]}, "languages": ["en"], "metrics": ["pearson_corrcoef", "spearman_corrcoef"]}
42
+ {"dataset": "wic", "model": "T5", "name": "wic-prompt-t5", "input": "wic sentence1: {{sentence1}} sentence2: {{sentence2}} word: {{word}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]}
43
+ {"dataset": "wiki_auto", "model": "T5", "name": "wiki_auto-prompt-t5", "input": "{{source}}", "target": "{{target}}", "metadata": {"languages": ["en"], "metrics": ["bleu"]}, "languages": ["en"], "metrics": ["bleu"]}
44
+ {"dataset": "wiki_lingua", "model": "T5", "name": "wiki_lingua-prompt-t5", "input": "{{source_aligned[\"en\"]}}", "target": "{{target_aligned[\"en\"]}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]}
45
+ {"dataset": "winogrande", "model": "T5", "name": "winogrande-prompt-t5", "input": "sentence: {{sentence}} option0: {{option1}} option1: {{option2}}", "target": "{{lambda answer: str(int(answer) - 1)}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]}
46
+ {"dataset": "wnli", "model": "T5", "name": "wnli-prompt-t5", "input": "wnli sentence1: {{sentence1}} sentence2: {{sentence2}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]}
47
+ {"dataset": "wsc", "model": "T5", "name": "wsc-prompt-t5", "input": "wsc text: {{text}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"], "preprocessing": ["wsc_preprocess"]}, "languages": ["en"], "metrics": ["accuracy"]}
48
+ {"dataset": "xsum", "model": "T5", "name": "xsum-prompt-t5", "input": "summarize: {{document}}", "target": "{{target}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]}