|
{"dataset": "afp", "model": "mT0", "name": "afp-factuality-prompt-mt0", "input": "factuality claim: {{claim}}", "target": "{{lambda label: choices[label]}}", "metadata": {"languages": ["bg", "bn", "ca", "cs", "de", "el", "en", "es", "fi", "fr", "hi", "hr", "hrv", "hu", "id", "ko", "ms", "my", "nl", "pl", "pt", "ro", "sk", "sv", "th"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters"]}, "languages": ["bg", "bn", "ca", "cs", "de", "el", "en", "es", "fi", "fr", "hi", "hr", "hrv", "hu", "id", "ko", "ms", "my", "nl", "pl", "pt", "ro", "sk", "sv", "th"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "liar", "model": "mT0", "name": "liar-binary-factuality-prompt-mt0", "input": "factuality claim: {{statement}}", "target": "{{lambda label: choices[0 if label in [0, 1, 5] else 1]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters"]}, "languages": ["en"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "liar", "model": "mT0", "name": "liar-multiclass-factuality-prompt-mt0", "input": "factuality claim: {{statement}}", "target": "{{lambda label: choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters"]}, "languages": ["en"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "xnli", "model": "mT0", "name": "xnli-prompt-mt0", "input": "{{premise}}\\nQuestion: {{hypothesis}} True, False or Neither?", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh"], "metrics": ["accuracy", "macro_f1"]}, "languages": ["ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "cb", "model": "T5", "name": "cb-prompt-t5", "input": "cb premise: {{premise}} hypothesis: {{hypothesis}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy", "multiclass_f1"]}, "languages": ["en"], "metrics": ["accuracy", "multiclass_f1"]} |
|
{"dataset": "aeslc", "model": "T5", "name": "aeslc-prompt-t5", "input": "summarize: {{email_body}}", "target": "{{subject_line}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]} |
|
{"dataset": "boolq", "model": "T5", "name": "boolq-prompt-t5", "input": "boolq question: {{question}} passage: {{passage}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "lesa2021", "model": "mT0", "name": "lesa2021-checkworthiness-prompt-mt0", "input": "checkworthiness claim: {{en}}", "target": "{{lambda claim: choices[int(claim)]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters"]}, "languages": ["en"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "newsroom", "model": "T5", "name": "newsroom-prompt-t5", "input": "summarize: {{text}}", "target": "{{summary}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]} |
|
{"dataset": "clef2018", "model": "mT0", "name": "clef2018-checkworthiness-prompt-mt0", "input": "checkworthiness claim: {{text}}", "target": "{{lambda label: choices[label]}}", "metadata": {"languages": ["ar", "en"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters"]}, "languages": ["ar", "en"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "record", "model": "T5", "name": "record-prompt-t5", "input": "record query: {{query}} entities: {{\", \".join(entities)}} passage: {{passage}}", "target": "{{answers[0]}}", "metadata": {"languages": ["en"], "metrics": ["squad"], "preprocessing": ["record_preprocess"]}, "languages": ["en"], "metrics": ["squad"]} |
|
{"dataset": "anli", "model": "T5", "name": "anli-prompt-t5", "input": "premise: {{premise}} hypothesis: {{hypothesis}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "skquad", "model": "mT0", "name": "skquad-prefix-prompt-mt0", "input": "question: {{question}} context: {{context}}", "target": "{{lambda answers: answers[\"text\"][0]}}", "metadata": {"languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]} |
|
{"dataset": "skquad", "model": "mT0", "name": "skquad-instruct1-prompt-mt0", "input": "Answer the question depending on the context. Context: {{context}}; Question: {{question}}; Answer:", "target": "{{lambda answers: answers[\"text\"][0]}}", "metadata": {"languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]}, "languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]} |
|
{"dataset": "skquad", "model": "mT0", "name": "skquad-instruct2-prompt-mt0", "input": "What is the answer? Context: {{context}}; Question: {{question}}; Answer:", "target": "{{lambda answers: answers[\"text\"][0]}}", "metadata": {"languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]}, "languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]} |
|
{"dataset": "skquad", "model": "mT0", "name": "skquad-instruct3-prompt-mt0", "input": "Given the following passage \"{{context}}\", answer the following question. Note that the answer is present within the text. Question: {{question}}", "target": "{{lambda answers: answers[\"text\"][0]}}", "metadata": {"languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]}, "languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]} |
|
{"dataset": "skquad", "model": "mT0", "name": "skquad-instruct4-prompt-mt0", "input": "Refer to the passage below and answer the following question: Passage: {{context}} Question: {{question}}", "target": "{{lambda answers: answers[\"text\"][0]}}", "metadata": {"languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]}, "languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]} |
|
{"dataset": "wikiann", "model": "mT0", "name": "wikiann-prompt-mt0", "input": "Sentence: {{lambda tokens: \" \".join(tokens)}}\\nIdentify all named entities in the sentence using PER, LOC, ORG.", "target": "{{lambda spans: \", \".join(spans)}}", "metadata": {"languages": ["en", "ar", "de", "es", "fr", "hi", "it", "ja", "nl", "pt", "ru", "zh", "cs", "sk"], "metrics": ["span_f1"]}, "languages": ["en", "ar", "de", "es", "fr", "hi", "it", "ja", "nl", "pt", "ru", "zh", "cs", "sk"], "metrics": ["span_f1"]} |
|
{"dataset": "billsum", "model": "T5", "name": "billsum-prompt-t5", "input": "summarize: {{text}}", "target": "{{summary}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]} |
|
{"dataset": "mlqa", "model": "mT0", "name": "mlqa-prefix-prompt-mt0", "input": "question: {{question}} context: {{context}}", "target": "{{lambda answers: answers[\"text\"][0]}}", "metadata": {"languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]} |
|
{"dataset": "mlqa", "model": "mT0", "name": "mlqa-instruct1-prompt-mt0", "input": "Answer the question depending on the context. Context: {{context}}; Question: {{question}}; Answer:", "target": "{{lambda answers: answers[\"text\"][0]}}", "metadata": {"languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]}, "languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]} |
|
{"dataset": "mlqa", "model": "mT0", "name": "mlqa-instruct2-prompt-mt0", "input": "What is the answer? Context: {{context}}; Question: {{question}}; Answer:", "target": "{{lambda answers: answers[\"text\"][0]}}", "metadata": {"languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]}, "languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]} |
|
{"dataset": "mlqa", "model": "mT0", "name": "mlqa-instruct3-prompt-mt0", "input": "Given the following passage \"{{context}}\", answer the following question. Note that the answer is present within the text. Question: {{question}}", "target": "{{lambda answers: answers[\"text\"][0]}}", "metadata": {"languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]}, "languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]} |
|
{"dataset": "mlqa", "model": "mT0", "name": "mlqa-instruct4-prompt-mt0", "input": "Refer to the passage below and answer the following question: Passage: {{context}} Question: {{question}}", "target": "{{lambda answers: answers[\"text\"][0]}}", "metadata": {"languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]}, "languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]} |
|
{"dataset": "drop", "model": "T5", "name": "drop-prompt-t5", "input": "question: {{question}} context: {{passage}}", "target": "{{answers_spans[\"spans\"][0]}}", "metadata": {"languages": ["en"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["squad"]} |
|
{"dataset": "stsb", "model": "T5", "name": "stsb-prompt-t5", "input": "stsb sentence1: {{sentence1}} sentence2: {{sentence2}}", "target": "{{lambda label: np.round((label * 5) / 5, decimals=1)}}", "metadata": {"languages": ["en"], "metrics": ["pearson_corrcoef", "spearman_corrcoef"]}, "languages": ["en"], "metrics": ["pearson_corrcoef", "spearman_corrcoef"]} |
|
{"dataset": "mrpc", "model": "T5", "name": "mrpc-prompt-t5", "input": "mrpc sentence1: {{sentence1}} sentence2: {{sentence2}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy", "f1_invalid"]}, "languages": ["en"], "metrics": ["accuracy", "f1_invalid"]} |
|
{"dataset": "gigaword", "model": "T5", "name": "gigaword-prompt-t5", "input": "summarize: {{document}}", "target": "{{summary}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]} |
|
{"dataset": "piqa", "model": "T5", "name": "piqa-prompt-t5", "input": "question: {{goal}} choice1: {{sol1}} choice2: {{sol2}}", "target": "{{lambda label: str(label)}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "cosmos_qa", "model": "T5", "name": "cosmos_qa-prompt-t5", "input": "question: {{question}} context: {{context}} choice0: {{answer0}} choice1: {{answer1}} choice2: {{answer2}} choice3: {{answer3}}", "target": "{{lambda label: str(label)}}", "metadata": {"languages": ["en"], "metrics": ["squad"]}, "languages": ["en"], "metrics": ["squad"]} |
|
{"dataset": "c4", "model": "T5", "name": "c4-prompt-t5", "input": "", "target": "{{text}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]} |
|
{"dataset": "copa", "model": "T5", "name": "copa-prompt-t5", "input": "copa premise: {{premise}} choice1: {{choice1}} choice2: {{choice2}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "mnli", "model": "T5", "name": "mnli-prompt-t5", "input": "mnli premise: {{premise}} hypothesis: {{hypothesis}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "cola", "model": "T5", "name": "cola-prompt-t5", "input": "cola sentence: {{sentence}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["matthews_corrcoef"]}, "languages": ["en"], "metrics": ["matthews_corrcoef"]} |
|
{"dataset": "wnli", "model": "T5", "name": "wnli-prompt-t5", "input": "wnli sentence1: {{sentence1}} sentence2: {{sentence2}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "social_i_qa", "model": "T5", "name": "social_i_qa-prompt-t5", "input": "question: {{question}} context: {{context}} || choice0: {{answerA}} || choice1: {{answerB}} || choice2: {{answerC}}", "target": "{{lambda label: str(int(label) - 1)}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "wiki_auto", "model": "T5", "name": "wiki_auto-prompt-t5", "input": "{{source}}", "target": "{{target}}", "metadata": {"languages": ["en"], "metrics": ["bleu"]}, "languages": ["en"], "metrics": ["bleu"]} |
|
{"dataset": "hellaswag", "model": "T5", "name": "hellaswag-prompt-t5", "input": "context: {{ctx}} ending0: {{endings[0]}} ending1: {{endings[1]}} ending2: {{endings[2]}} ending3: {{endings[3]}}", "target": "{{lambda label: str(label)}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "search_qa", "model": "T5", "name": "search_qa-prompt-t5", "input": "question: {{question}}", "target": "{{answer}}", "metadata": {"languages": ["en"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["squad"]} |
|
{"dataset": "demagog", "model": "mT0", "name": "demagog-factuality-prompt-mt0", "input": "factuality claim: {{claim}}", "target": "{{lambda label: choices[0 if label in [\"Zav\u00e1d\u011bj\u00edc\u00ed\", \"Nepravda\", \"Zav\u00e1dzaj\u00face\"] else (1 if label in [\"Pravda\"] else 2)]}}", "metadata": {"languages": ["cs", "sk"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters"]}, "languages": ["cs", "sk"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "claimbuster", "model": "mT0", "name": "claimbuster-checkworthiness-prompt-mt0", "input": "checkworthiness claim: {{text}}", "target": "{{lambda label: choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters"]}, "languages": ["en"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "wikipedia", "model": "mT0", "name": "wikipedia-prompt-mt0", "input": "", "target": "{{text}}", "metadata": {"languages": ["en", "ar", "de", "es", "fr", "hi", "it", "ja", "nl", "pt", "ru", "zh", "cs", "sk"], "metrics": ["span_f1"]}, "languages": ["en", "ar", "de", "es", "fr", "hi", "it", "ja", "nl", "pt", "ru", "zh", "cs", "sk"], "metrics": ["span_f1"]} |
|
{"dataset": "wsc", "model": "T5", "name": "wsc-prompt-t5", "input": "wsc text: {{text}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"], "preprocessing": ["wsc_preprocess"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "multi_news", "model": "T5", "name": "multi_news-prompt-t5", "input": "summarize: {{document}}", "target": "{{summary}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]} |
|
{"dataset": "clef2021", "model": "mT0", "name": "clef2021-checkworthiness-prompt-mt0", "input": "checkworthiness claim: {{tweet_text}}", "target": "{{lambda check_worthiness: choices[check_worthiness]}}", "metadata": {"languages": ["ar", "bg", "nl", "en", "es", "tr"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters"]}, "languages": ["ar", "bg", "nl", "en", "es", "tr"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "pawsx", "model": "mT0", "name": "paws-x-prompt-mt0", "input": "Sentence1: {{sentence1}}\\nSentence2: {{sentence2}}\\nQuestion: Do Sentence 1 and Sentence 2 express the same meaning? Yes or No?", "target": "{{lambda label: choices[label]}}", "metadata": {"languages": ["en", "fr", "es", "de", "zh", "ja", "ko"], "metrics": ["accuracy", "macro_f1"]}, "languages": ["en", "fr", "es", "de", "zh", "ja", "ko"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "samsum", "model": "T5", "name": "samsum-prompt-t5", "input": "summarize: {{dialogue}}", "target": "{{summary}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]} |
|
{"dataset": "winogrande", "model": "T5", "name": "winogrande-prompt-t5", "input": "sentence: {{sentence}} option0: {{option1}} option1: {{option2}}", "target": "{{lambda answer: str(int(answer) - 1)}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "hotpot_qa", "model": "T5", "name": "hotpot_qa-prompt-t5", "input": "question: {{question}} context: {{\"\".join(context[\"sentences\"][0])}}", "target": "{{answer}}", "metadata": {"languages": ["en"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["squad"]} |
|
{"dataset": "wiki_lingua", "model": "T5", "name": "wiki_lingua-prompt-t5", "input": "{{source_aligned[\"en\"]}}", "target": "{{target_aligned[\"en\"]}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]} |
|
{"dataset": "multi_nli", "model": "T5", "name": "multi_nli-prompt-t5", "input": "premise: {{premise}} hypothesis: {{hypothesis}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "squad", "model": "T5", "name": "squad-prompt-t5", "input": "question: {{question}} context: {{context}}", "target": "{{answers[\"text\"][0]}}", "metadata": {"languages": ["en"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["squad"]} |
|
{"dataset": "squad", "model": "T5", "name": "squad-prompt-trivia-t5", "input": "squad trivia question: {{question}}", "target": "{{answers[\"text\"][0]}}", "metadata": {"languages": ["en"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["squad"]} |
|
{"dataset": "multirc", "model": "T5", "name": "multirc-prompt-t5", "input": "multirc question: {{question}} answer: {{answer}} paragraph: {{paragraph}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["exact_match", "multirc_f1"], "preprocessing": ["remove_markup"]}, "languages": ["en"], "metrics": ["exact_match", "multirc_f1"]} |
|
{"dataset": "rte", "model": "T5", "name": "rte-prompt-t5", "input": "rte sentence1: {{sentence1}} sentence2: {{sentence2}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "mrqa", "model": "T5", "name": "mrqa-prompt-t5", "input": "question: {{question}} context: {{context}}", "target": "{{answers[0]}}", "metadata": {"languages": ["en"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["squad"]} |
|
{"dataset": "cxc", "model": "T5", "name": "cxc-prompt-t5", "input": "sentence1: {{sentence1}} sentence2: {{sentence2}}", "target": "{{lambda score: np.round((score * 5) / 5, decimals=1)}}", "metadata": {"languages": ["en"], "metrics": ["pearson_corrcoef", "spearman_corrcoef"]}, "languages": ["en"], "metrics": ["pearson_corrcoef", "spearman_corrcoef"]} |
|
{"dataset": "doc_nli", "model": "T5", "name": "doc_nli-prompt-t5", "input": "premise: {{premise}} hypothesis: {{hypothesis}}", "target": "{{label}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "snli", "model": "T5", "name": "snli-prompt-t5", "input": "premise: {{premise}} hypothesis: {{hypothesis}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "newsqa", "model": "T5", "name": "newsqa-prompt-t5", "input": "question: {{question}} context: {{context}}", "target": "{{answer[0]}}", "metadata": {"languages": ["en"], "metrics": ["rouge"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["rouge"]} |
|
{"dataset": "newsqa", "model": "T5", "name": "newsqa-prompt-t5-without-context", "input": "question: {{question}}", "target": "{{answer[0]}}", "metadata": {"languages": ["en"], "metrics": ["rouge"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["rouge"]} |
|
{"dataset": "xfact", "model": "mT0", "name": "xfact-factuality-prompt-mt0", "input": "factuality claim: {{claim}}", "target": "{{lambda label: choices[{\"false\": 0, \"partly true/misleading\": 0, \"mostly false\": 0, \"true\": 1, \"mostly true\": 1, \"half true\": 1, \"complicated/hard to categorise\": 2, \"other\": 2}[label]]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters"]}, "languages": ["en"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "xfact", "model": "mT0", "name": "xfact-factuality-evidence-prompt-mt0", "input": "factuality claim: {{claim}} evidence1: {{evidence_1}} evidence2: {{evidence_2}} evidence3: {{evidence_3}} evidence4: {{evidence_4}} evidence5: {{evidence_5}}", "target": "{{lambda label: choices[{\"false\": 0, \"partly true/misleading\": 0, \"mostly false\": 0, \"true\": 1, \"mostly true\": 1, \"half true\": 1, \"complicated/hard to categorise\": 2, \"other\": 2}[label]]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters", "pad_punctuation"]}, "languages": ["en"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "common_gen", "model": "T5", "name": "common_gen-prompt-t5", "input": "generate: {{lambda concepts: \" \".join(concepts)}}", "target": "{{target}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]} |
|
{"dataset": "cssquad", "model": "mT0", "name": "cssquad-prefix-prompt-mt0", "input": "question: {{question}} context: {{context}}", "target": "{{lambda answers: answers[\"text\"][0]}}", "metadata": {"languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]} |
|
{"dataset": "cssquad", "model": "mT0", "name": "cssquad-instruct1-prompt-mt0", "input": "Answer the question depending on the context. Context: {{context}}; Question: {{question}}; Answer:", "target": "{{lambda answers: answers[\"text\"][0]}}", "metadata": {"languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]}, "languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]} |
|
{"dataset": "cssquad", "model": "mT0", "name": "cssquad-instruct2-prompt-mt0", "input": "What is the answer? Context: {{context}}; Question: {{question}}; Answer:", "target": "{{lambda answers: answers[\"text\"][0]}}", "metadata": {"languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]}, "languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]} |
|
{"dataset": "cssquad", "model": "mT0", "name": "cssquad-instruct3-prompt-mt0", "input": "Given the following passage \"{{context}}\", answer the following question. Note that the answer is present within the text. Question: {{question}}", "target": "{{lambda answers: answers[\"text\"][0]}}", "metadata": {"languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]}, "languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]} |
|
{"dataset": "cssquad", "model": "mT0", "name": "cssquad-instruct4-prompt-mt0", "input": "Refer to the passage below and answer the following question: Passage: {{context}} Question: {{question}}", "target": "{{lambda answers: answers[\"text\"][0]}}", "metadata": {"languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]}, "languages": ["en", "ar", "de", "es", "hi", "vi", "zh"], "metrics": ["squad"]} |
|
{"dataset": "qqp", "model": "T5", "name": "qqp-prompt-t5", "input": "qqp question1: {{question1}} question2: {{question2}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy", "f1_invalid"]}, "languages": ["en"], "metrics": ["accuracy", "f1_invalid"]} |
|
{"dataset": "clef2023", "model": "mT0", "name": "clef2023-checkworthiness-prompt-mt0", "input": "checkworthiness claim: {{Text}}", "target": "{{lambda class_label: choices[0 if class_label.lower() == \"no\" else 1]}}", "metadata": {"languages": ["ar", "en", "es"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters"]}, "languages": ["ar", "en", "es"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "hover", "model": "mT0", "name": "hover-factuality-prompt-mt0", "input": "factuality claim: {{claim}}", "target": "{{lambda label: choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters"]}, "languages": ["en"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "fakecovid", "model": "mT0", "name": "fakecovid-factuality-prompt-mt0", "input": "factuality claim: {{source_title}}", "target": "{{lambda class: choices[class]}}", "metadata": {"languages": ["es", "en", "fr", "pt", "hi", "de", "it", "zh", "ar", "nl", "ko", "pl", "ru", "ja", "sk"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters"]}, "languages": ["es", "en", "fr", "pt", "hi", "de", "it", "zh", "ar", "nl", "ko", "pl", "ru", "ja", "sk"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "ctkfacts", "model": "mT0", "name": "ctkfacts-factuality-prompt-mt0", "input": "factuality claim: {{claim}} evidence: {{evidence}}", "target": "{{lambda label: choices[label]}}", "metadata": {"languages": ["cs"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters"]}, "languages": ["cs"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "clef2022", "model": "mT0", "name": "clef2022-checkworthiness-prompt-mt0", "input": "checkworthiness claim: {{tweet_text}}", "target": "{{lambda class_label: choices[class_label]}}", "metadata": {"languages": ["ar", "bg", "nl", "en", "es", "tr"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters"]}, "languages": ["ar", "bg", "nl", "en", "es", "tr"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "nq_open", "model": "T5", "name": "nq_open-prompt-t5", "input": "nq question: {{question}}", "target": "{{answer[0]}}", "metadata": {"languages": ["en"], "metrics": ["squad"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["squad"]} |
|
{"dataset": "xsum", "model": "T5", "name": "xsum-prompt-t5", "input": "summarize: {{document}}", "target": "{{target}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]} |
|
{"dataset": "csfever", "model": "mT0", "name": "csfever-factuality-prompt-mt0", "input": "factuality claim: {{claim}} evidence: {{evidence}}", "target": "{{lambda label: choices[label]}}", "metadata": {"languages": ["cs"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters"]}, "languages": ["cs"], "metrics": ["accuracy", "macro_f1"]} |
|
{"dataset": "cnn_dailymail", "model": "T5", "name": "cnn_dailymail-prompt-t5", "input": "summarize: {{article}}", "target": "{{highlights}}", "metadata": {"languages": ["en"], "metrics": ["rouge"]}, "languages": ["en"], "metrics": ["rouge"]} |
|
{"dataset": "qnli", "model": "T5", "name": "qnli-prompt-t5", "input": "qnli question: {{question}} sentence: {{sentence}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "wic", "model": "T5", "name": "wic-prompt-t5", "input": "wic sentence1: {{sentence1}} sentence2: {{sentence2}} word: {{word}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "race", "model": "T5", "name": "race-prompt-t5", "input": "question: {{question}} context: {{article}} choice0: {{options[0]}} choice1: {{options[1]}} choice2: {{options[2]}} choice3: {{options[3]}}", "target": "{{lambda answer: str(ord(answer) - ord(\"A\"))}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "sst2", "model": "T5", "name": "sst2-prompt-t5", "input": "sst2 sentence: {{sentence}}", "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy"], "preprocessing": ["pad_punctuation"]}, "languages": ["en"], "metrics": ["accuracy"]} |
|
{"dataset": "fever", "model": "mT0", "name": "fever-factuality-prompt-mt0", "input": "factuality claim: {{claim}}", "target": "{{lambda label: choices[{\"SUPPORTS\": 0, \"REFUTES\": 1, \"NOT ENOUGH INFO\": 2}[label]]}}", "metadata": {"languages": ["en"], "metrics": ["accuracy", "macro_f1"], "preprocessing": ["remove_urls", "replace_whitecharacters"]}, "languages": ["en"], "metrics": ["accuracy", "macro_f1"]} |
|
|