dataset
stringlengths 2
13
| model
stringclasses 2
values | name
stringlengths 12
38
| input
stringlengths 0
157
| target
stringlengths 8
188
| metadata
dict | languages
sequencelengths 1
25
| metrics
sequencelengths 1
2
|
---|---|---|---|---|---|---|---|
afp | mT0 | afp-factuality-prompt-mt0 | factuality claim: {{claim}} | {{lambda label: choices[label]}} | {
"languages": [
"bg",
"bn",
"ca",
"cs",
"de",
"el",
"en",
"es",
"fi",
"fr",
"hi",
"hr",
"hrv",
"hu",
"id",
"ko",
"ms",
"my",
"nl",
"pl",
"pt",
"ro",
"sk",
"sv",
"th"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters"
]
} | [
"bg",
"bn",
"ca",
"cs",
"de",
"el",
"en",
"es",
"fi",
"fr",
"hi",
"hr",
"hrv",
"hu",
"id",
"ko",
"ms",
"my",
"nl",
"pl",
"pt",
"ro",
"sk",
"sv",
"th"
] | [
"accuracy",
"macro_f1"
] |
liar | mT0 | liar-binary-factuality-prompt-mt0 | factuality claim: {{statement}} | {{lambda label: choices[0 if label in [0, 1, 5] else 1]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters"
]
} | [
"en"
] | [
"accuracy",
"macro_f1"
] |
liar | mT0 | liar-multiclass-factuality-prompt-mt0 | factuality claim: {{statement}} | {{lambda label: choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters"
]
} | [
"en"
] | [
"accuracy",
"macro_f1"
] |
xnli | mT0 | xnli-prompt-mt0 | {{premise}}\nQuestion: {{hypothesis}} True, False or Neither? | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"ar",
"bg",
"de",
"el",
"en",
"es",
"fr",
"hi",
"ru",
"sw",
"th",
"tr",
"ur",
"vi",
"zh"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": null
} | [
"ar",
"bg",
"de",
"el",
"en",
"es",
"fr",
"hi",
"ru",
"sw",
"th",
"tr",
"ur",
"vi",
"zh"
] | [
"accuracy",
"macro_f1"
] |
cb | T5 | cb-prompt-t5 | cb premise: {{premise}} hypothesis: {{hypothesis}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy",
"multiclass_f1"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy",
"multiclass_f1"
] |
aeslc | T5 | aeslc-prompt-t5 | summarize: {{email_body}} | {{subject_line}} | {
"languages": [
"en"
],
"metrics": [
"rouge"
],
"preprocessing": null
} | [
"en"
] | [
"rouge"
] |
boolq | T5 | boolq-prompt-t5 | boolq question: {{question}} passage: {{passage}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy"
] |
lesa2021 | mT0 | lesa2021-checkworthiness-prompt-mt0 | checkworthiness claim: {{en}} | {{lambda claim: choices[int(claim)]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters"
]
} | [
"en"
] | [
"accuracy",
"macro_f1"
] |
newsroom | T5 | newsroom-prompt-t5 | summarize: {{text}} | {{summary}} | {
"languages": [
"en"
],
"metrics": [
"rouge"
],
"preprocessing": null
} | [
"en"
] | [
"rouge"
] |
clef2018 | mT0 | clef2018-checkworthiness-prompt-mt0 | checkworthiness claim: {{text}} | {{lambda label: choices[label]}} | {
"languages": [
"ar",
"en"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters"
]
} | [
"ar",
"en"
] | [
"accuracy",
"macro_f1"
] |
record | T5 | record-prompt-t5 | record query: {{query}} entities: {{", ".join(entities)}} passage: {{passage}} | {{answers[0]}} | {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": [
"record_preprocess"
]
} | [
"en"
] | [
"squad"
] |
anli | T5 | anli-prompt-t5 | premise: {{premise}} hypothesis: {{hypothesis}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy"
] |
skquad | mT0 | skquad-prefix-prompt-mt0 | question: {{question}} context: {{context}} | {{lambda answers: answers["text"][0]}} | {
"languages": [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
} | [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
] | [
"squad"
] |
skquad | mT0 | skquad-instruct1-prompt-mt0 | Answer the question depending on the context. Context: {{context}}; Question: {{question}}; Answer: | {{lambda answers: answers["text"][0]}} | {
"languages": [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
],
"metrics": [
"squad"
],
"preprocessing": null
} | [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
] | [
"squad"
] |
skquad | mT0 | skquad-instruct2-prompt-mt0 | What is the answer? Context: {{context}}; Question: {{question}}; Answer: | {{lambda answers: answers["text"][0]}} | {
"languages": [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
],
"metrics": [
"squad"
],
"preprocessing": null
} | [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
] | [
"squad"
] |
skquad | mT0 | skquad-instruct3-prompt-mt0 | Given the following passage "{{context}}", answer the following question. Note that the answer is present within the text. Question: {{question}} | {{lambda answers: answers["text"][0]}} | {
"languages": [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
],
"metrics": [
"squad"
],
"preprocessing": null
} | [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
] | [
"squad"
] |
skquad | mT0 | skquad-instruct4-prompt-mt0 | Refer to the passage below and answer the following question: Passage: {{context}} Question: {{question}} | {{lambda answers: answers["text"][0]}} | {
"languages": [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
],
"metrics": [
"squad"
],
"preprocessing": null
} | [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
] | [
"squad"
] |
wikiann | mT0 | wikiann-prompt-mt0 | Sentence: {{lambda tokens: " ".join(tokens)}}\nIdentify all named entities in the sentence using PER, LOC, ORG. | {{lambda spans: ", ".join(spans)}} | {
"languages": [
"en",
"ar",
"de",
"es",
"fr",
"hi",
"it",
"ja",
"nl",
"pt",
"ru",
"zh",
"cs",
"sk"
],
"metrics": [
"span_f1"
],
"preprocessing": null
} | [
"en",
"ar",
"de",
"es",
"fr",
"hi",
"it",
"ja",
"nl",
"pt",
"ru",
"zh",
"cs",
"sk"
] | [
"span_f1"
] |
billsum | T5 | billsum-prompt-t5 | summarize: {{text}} | {{summary}} | {
"languages": [
"en"
],
"metrics": [
"rouge"
],
"preprocessing": null
} | [
"en"
] | [
"rouge"
] |
mlqa | mT0 | mlqa-prefix-prompt-mt0 | question: {{question}} context: {{context}} | {{lambda answers: answers["text"][0]}} | {
"languages": [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
} | [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
] | [
"squad"
] |
mlqa | mT0 | mlqa-instruct1-prompt-mt0 | Answer the question depending on the context. Context: {{context}}; Question: {{question}}; Answer: | {{lambda answers: answers["text"][0]}} | {
"languages": [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
],
"metrics": [
"squad"
],
"preprocessing": null
} | [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
] | [
"squad"
] |
mlqa | mT0 | mlqa-instruct2-prompt-mt0 | What is the answer? Context: {{context}}; Question: {{question}}; Answer: | {{lambda answers: answers["text"][0]}} | {
"languages": [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
],
"metrics": [
"squad"
],
"preprocessing": null
} | [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
] | [
"squad"
] |
mlqa | mT0 | mlqa-instruct3-prompt-mt0 | Given the following passage "{{context}}", answer the following question. Note that the answer is present within the text. Question: {{question}} | {{lambda answers: answers["text"][0]}} | {
"languages": [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
],
"metrics": [
"squad"
],
"preprocessing": null
} | [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
] | [
"squad"
] |
mlqa | mT0 | mlqa-instruct4-prompt-mt0 | Refer to the passage below and answer the following question: Passage: {{context}} Question: {{question}} | {{lambda answers: answers["text"][0]}} | {
"languages": [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
],
"metrics": [
"squad"
],
"preprocessing": null
} | [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
] | [
"squad"
] |
drop | T5 | drop-prompt-t5 | question: {{question}} context: {{passage}} | {{answers_spans["spans"][0]}} | {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
} | [
"en"
] | [
"squad"
] |
stsb | T5 | stsb-prompt-t5 | stsb sentence1: {{sentence1}} sentence2: {{sentence2}} | {{lambda label: np.round((label * 5) / 5, decimals=1)}} | {
"languages": [
"en"
],
"metrics": [
"pearson_corrcoef",
"spearman_corrcoef"
],
"preprocessing": null
} | [
"en"
] | [
"pearson_corrcoef",
"spearman_corrcoef"
] |
mrpc | T5 | mrpc-prompt-t5 | mrpc sentence1: {{sentence1}} sentence2: {{sentence2}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy",
"f1_invalid"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy",
"f1_invalid"
] |
gigaword | T5 | gigaword-prompt-t5 | summarize: {{document}} | {{summary}} | {
"languages": [
"en"
],
"metrics": [
"rouge"
],
"preprocessing": null
} | [
"en"
] | [
"rouge"
] |
piqa | T5 | piqa-prompt-t5 | question: {{goal}} choice1: {{sol1}} choice2: {{sol2}} | {{lambda label: str(label)}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy"
] |
cosmos_qa | T5 | cosmos_qa-prompt-t5 | question: {{question}} context: {{context}} choice0: {{answer0}} choice1: {{answer1}} choice2: {{answer2}} choice3: {{answer3}} | {{lambda label: str(label)}} | {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": null
} | [
"en"
] | [
"squad"
] |
c4 | T5 | c4-prompt-t5 | {{text}} | {
"languages": [
"en"
],
"metrics": [
"rouge"
],
"preprocessing": null
} | [
"en"
] | [
"rouge"
] |
|
copa | T5 | copa-prompt-t5 | copa premise: {{premise}} choice1: {{choice1}} choice2: {{choice2}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy"
] |
mnli | T5 | mnli-prompt-t5 | mnli premise: {{premise}} hypothesis: {{hypothesis}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy"
] |
cola | T5 | cola-prompt-t5 | cola sentence: {{sentence}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"matthews_corrcoef"
],
"preprocessing": null
} | [
"en"
] | [
"matthews_corrcoef"
] |
wnli | T5 | wnli-prompt-t5 | wnli sentence1: {{sentence1}} sentence2: {{sentence2}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy"
] |
social_i_qa | T5 | social_i_qa-prompt-t5 | question: {{question}} context: {{context}} || choice0: {{answerA}} || choice1: {{answerB}} || choice2: {{answerC}} | {{lambda label: str(int(label) - 1)}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy"
] |
wiki_auto | T5 | wiki_auto-prompt-t5 | {{source}} | {{target}} | {
"languages": [
"en"
],
"metrics": [
"bleu"
],
"preprocessing": null
} | [
"en"
] | [
"bleu"
] |
hellaswag | T5 | hellaswag-prompt-t5 | context: {{ctx}} ending0: {{endings[0]}} ending1: {{endings[1]}} ending2: {{endings[2]}} ending3: {{endings[3]}} | {{lambda label: str(label)}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy"
] |
search_qa | T5 | search_qa-prompt-t5 | question: {{question}} | {{answer}} | {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
} | [
"en"
] | [
"squad"
] |
demagog | mT0 | demagog-factuality-prompt-mt0 | factuality claim: {{claim}} | {{lambda label: choices[0 if label in ["Zavádějící", "Nepravda", "Zavádzajúce"] else (1 if label in ["Pravda"] else 2)]}} | {
"languages": [
"cs",
"sk"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters"
]
} | [
"cs",
"sk"
] | [
"accuracy",
"macro_f1"
] |
claimbuster | mT0 | claimbuster-checkworthiness-prompt-mt0 | checkworthiness claim: {{text}} | {{lambda label: choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters"
]
} | [
"en"
] | [
"accuracy",
"macro_f1"
] |
wikipedia | mT0 | wikipedia-prompt-mt0 | {{text}} | {
"languages": [
"en",
"ar",
"de",
"es",
"fr",
"hi",
"it",
"ja",
"nl",
"pt",
"ru",
"zh",
"cs",
"sk"
],
"metrics": [
"span_f1"
],
"preprocessing": null
} | [
"en",
"ar",
"de",
"es",
"fr",
"hi",
"it",
"ja",
"nl",
"pt",
"ru",
"zh",
"cs",
"sk"
] | [
"span_f1"
] |
|
wsc | T5 | wsc-prompt-t5 | wsc text: {{text}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": [
"wsc_preprocess"
]
} | [
"en"
] | [
"accuracy"
] |
multi_news | T5 | multi_news-prompt-t5 | summarize: {{document}} | {{summary}} | {
"languages": [
"en"
],
"metrics": [
"rouge"
],
"preprocessing": null
} | [
"en"
] | [
"rouge"
] |
clef2021 | mT0 | clef2021-checkworthiness-prompt-mt0 | checkworthiness claim: {{tweet_text}} | {{lambda check_worthiness: choices[check_worthiness]}} | {
"languages": [
"ar",
"bg",
"nl",
"en",
"es",
"tr"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters"
]
} | [
"ar",
"bg",
"nl",
"en",
"es",
"tr"
] | [
"accuracy",
"macro_f1"
] |
pawsx | mT0 | paws-x-prompt-mt0 | Sentence1: {{sentence1}}\nSentence2: {{sentence2}}\nQuestion: Do Sentence 1 and Sentence 2 express the same meaning? Yes or No? | {{lambda label: choices[label]}} | {
"languages": [
"en",
"fr",
"es",
"de",
"zh",
"ja",
"ko"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": null
} | [
"en",
"fr",
"es",
"de",
"zh",
"ja",
"ko"
] | [
"accuracy",
"macro_f1"
] |
samsum | T5 | samsum-prompt-t5 | summarize: {{dialogue}} | {{summary}} | {
"languages": [
"en"
],
"metrics": [
"rouge"
],
"preprocessing": null
} | [
"en"
] | [
"rouge"
] |
winogrande | T5 | winogrande-prompt-t5 | sentence: {{sentence}} option0: {{option1}} option1: {{option2}} | {{lambda answer: str(int(answer) - 1)}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy"
] |
hotpot_qa | T5 | hotpot_qa-prompt-t5 | question: {{question}} context: {{"".join(context["sentences"][0])}} | {{answer}} | {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
} | [
"en"
] | [
"squad"
] |
wiki_lingua | T5 | wiki_lingua-prompt-t5 | {{source_aligned["en"]}} | {{target_aligned["en"]}} | {
"languages": [
"en"
],
"metrics": [
"rouge"
],
"preprocessing": null
} | [
"en"
] | [
"rouge"
] |
multi_nli | T5 | multi_nli-prompt-t5 | premise: {{premise}} hypothesis: {{hypothesis}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy"
] |
squad | T5 | squad-prompt-t5 | question: {{question}} context: {{context}} | {{answers["text"][0]}} | {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
} | [
"en"
] | [
"squad"
] |
squad | T5 | squad-prompt-trivia-t5 | squad trivia question: {{question}} | {{answers["text"][0]}} | {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
} | [
"en"
] | [
"squad"
] |
multirc | T5 | multirc-prompt-t5 | multirc question: {{question}} answer: {{answer}} paragraph: {{paragraph}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"exact_match",
"multirc_f1"
],
"preprocessing": [
"remove_markup"
]
} | [
"en"
] | [
"exact_match",
"multirc_f1"
] |
rte | T5 | rte-prompt-t5 | rte sentence1: {{sentence1}} sentence2: {{sentence2}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy"
] |
mrqa | T5 | mrqa-prompt-t5 | question: {{question}} context: {{context}} | {{answers[0]}} | {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
} | [
"en"
] | [
"squad"
] |
cxc | T5 | cxc-prompt-t5 | sentence1: {{sentence1}} sentence2: {{sentence2}} | {{lambda score: np.round((score * 5) / 5, decimals=1)}} | {
"languages": [
"en"
],
"metrics": [
"pearson_corrcoef",
"spearman_corrcoef"
],
"preprocessing": null
} | [
"en"
] | [
"pearson_corrcoef",
"spearman_corrcoef"
] |
doc_nli | T5 | doc_nli-prompt-t5 | premise: {{premise}} hypothesis: {{hypothesis}} | {{label}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy"
] |
snli | T5 | snli-prompt-t5 | premise: {{premise}} hypothesis: {{hypothesis}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy"
] |
newsqa | T5 | newsqa-prompt-t5 | question: {{question}} context: {{context}} | {{answer[0]}} | {
"languages": [
"en"
],
"metrics": [
"rouge"
],
"preprocessing": [
"pad_punctuation"
]
} | [
"en"
] | [
"rouge"
] |
newsqa | T5 | newsqa-prompt-t5-without-context | question: {{question}} | {{answer[0]}} | {
"languages": [
"en"
],
"metrics": [
"rouge"
],
"preprocessing": [
"pad_punctuation"
]
} | [
"en"
] | [
"rouge"
] |
xfact | mT0 | xfact-factuality-prompt-mt0 | factuality claim: {{claim}} | {{lambda label: choices[{"false": 0, "partly true/misleading": 0, "mostly false": 0, "true": 1, "mostly true": 1, "half true": 1, "complicated/hard to categorise": 2, "other": 2}[label]]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters"
]
} | [
"en"
] | [
"accuracy",
"macro_f1"
] |
xfact | mT0 | xfact-factuality-evidence-prompt-mt0 | factuality claim: {{claim}} evidence1: {{evidence_1}} evidence2: {{evidence_2}} evidence3: {{evidence_3}} evidence4: {{evidence_4}} evidence5: {{evidence_5}} | {{lambda label: choices[{"false": 0, "partly true/misleading": 0, "mostly false": 0, "true": 1, "mostly true": 1, "half true": 1, "complicated/hard to categorise": 2, "other": 2}[label]]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters",
"pad_punctuation"
]
} | [
"en"
] | [
"accuracy",
"macro_f1"
] |
common_gen | T5 | common_gen-prompt-t5 | generate: {{lambda concepts: " ".join(concepts)}} | {{target}} | {
"languages": [
"en"
],
"metrics": [
"rouge"
],
"preprocessing": null
} | [
"en"
] | [
"rouge"
] |
cssquad | mT0 | cssquad-prefix-prompt-mt0 | question: {{question}} context: {{context}} | {{lambda answers: answers["text"][0]}} | {
"languages": [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
} | [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
] | [
"squad"
] |
cssquad | mT0 | cssquad-instruct1-prompt-mt0 | Answer the question depending on the context. Context: {{context}}; Question: {{question}}; Answer: | {{lambda answers: answers["text"][0]}} | {
"languages": [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
],
"metrics": [
"squad"
],
"preprocessing": null
} | [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
] | [
"squad"
] |
cssquad | mT0 | cssquad-instruct2-prompt-mt0 | What is the answer? Context: {{context}}; Question: {{question}}; Answer: | {{lambda answers: answers["text"][0]}} | {
"languages": [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
],
"metrics": [
"squad"
],
"preprocessing": null
} | [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
] | [
"squad"
] |
cssquad | mT0 | cssquad-instruct3-prompt-mt0 | Given the following passage "{{context}}", answer the following question. Note that the answer is present within the text. Question: {{question}} | {{lambda answers: answers["text"][0]}} | {
"languages": [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
],
"metrics": [
"squad"
],
"preprocessing": null
} | [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
] | [
"squad"
] |
cssquad | mT0 | cssquad-instruct4-prompt-mt0 | Refer to the passage below and answer the following question: Passage: {{context}} Question: {{question}} | {{lambda answers: answers["text"][0]}} | {
"languages": [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
],
"metrics": [
"squad"
],
"preprocessing": null
} | [
"en",
"ar",
"de",
"es",
"hi",
"vi",
"zh"
] | [
"squad"
] |
qqp | T5 | qqp-prompt-t5 | qqp question1: {{question1}} question2: {{question2}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy",
"f1_invalid"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy",
"f1_invalid"
] |
clef2023 | mT0 | clef2023-checkworthiness-prompt-mt0 | checkworthiness claim: {{Text}} | {{lambda class_label: choices[0 if class_label.lower() == "no" else 1]}} | {
"languages": [
"ar",
"en",
"es"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters"
]
} | [
"ar",
"en",
"es"
] | [
"accuracy",
"macro_f1"
] |
hover | mT0 | hover-factuality-prompt-mt0 | factuality claim: {{claim}} | {{lambda label: choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters"
]
} | [
"en"
] | [
"accuracy",
"macro_f1"
] |
fakecovid | mT0 | fakecovid-factuality-prompt-mt0 | factuality claim: {{source_title}} | {{lambda class: choices[class]}} | {
"languages": [
"es",
"en",
"fr",
"pt",
"hi",
"de",
"it",
"zh",
"ar",
"nl",
"ko",
"pl",
"ru",
"ja",
"sk"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters"
]
} | [
"es",
"en",
"fr",
"pt",
"hi",
"de",
"it",
"zh",
"ar",
"nl",
"ko",
"pl",
"ru",
"ja",
"sk"
] | [
"accuracy",
"macro_f1"
] |
ctkfacts | mT0 | ctkfacts-factuality-prompt-mt0 | factuality claim: {{claim}} evidence: {{evidence}} | {{lambda label: choices[label]}} | {
"languages": [
"cs"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters"
]
} | [
"cs"
] | [
"accuracy",
"macro_f1"
] |
clef2022 | mT0 | clef2022-checkworthiness-prompt-mt0 | checkworthiness claim: {{tweet_text}} | {{lambda class_label: choices[class_label]}} | {
"languages": [
"ar",
"bg",
"nl",
"en",
"es",
"tr"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters"
]
} | [
"ar",
"bg",
"nl",
"en",
"es",
"tr"
] | [
"accuracy",
"macro_f1"
] |
nq_open | T5 | nq_open-prompt-t5 | nq question: {{question}} | {{answer[0]}} | {
"languages": [
"en"
],
"metrics": [
"squad"
],
"preprocessing": [
"pad_punctuation"
]
} | [
"en"
] | [
"squad"
] |
xsum | T5 | xsum-prompt-t5 | summarize: {{document}} | {{target}} | {
"languages": [
"en"
],
"metrics": [
"rouge"
],
"preprocessing": null
} | [
"en"
] | [
"rouge"
] |
csfever | mT0 | csfever-factuality-prompt-mt0 | factuality claim: {{claim}} evidence: {{evidence}} | {{lambda label: choices[label]}} | {
"languages": [
"cs"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters"
]
} | [
"cs"
] | [
"accuracy",
"macro_f1"
] |
cnn_dailymail | T5 | cnn_dailymail-prompt-t5 | summarize: {{article}} | {{highlights}} | {
"languages": [
"en"
],
"metrics": [
"rouge"
],
"preprocessing": null
} | [
"en"
] | [
"rouge"
] |
qnli | T5 | qnli-prompt-t5 | qnli question: {{question}} sentence: {{sentence}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy"
] |
wic | T5 | wic-prompt-t5 | wic sentence1: {{sentence1}} sentence2: {{sentence2}} word: {{word}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy"
] |
race | T5 | race-prompt-t5 | question: {{question}} context: {{article}} choice0: {{options[0]}} choice1: {{options[1]}} choice2: {{options[2]}} choice3: {{options[3]}} | {{lambda answer: str(ord(answer) - ord("A"))}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": null
} | [
"en"
] | [
"accuracy"
] |
sst2 | T5 | sst2-prompt-t5 | sst2 sentence: {{sentence}} | {{lambda label: "<unk>" if label == -1 else choices[label]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy"
],
"preprocessing": [
"pad_punctuation"
]
} | [
"en"
] | [
"accuracy"
] |
fever | mT0 | fever-factuality-prompt-mt0 | factuality claim: {{claim}} | {{lambda label: choices[{"SUPPORTS": 0, "REFUTES": 1, "NOT ENOUGH INFO": 2}[label]]}} | {
"languages": [
"en"
],
"metrics": [
"accuracy",
"macro_f1"
],
"preprocessing": [
"remove_urls",
"replace_whitecharacters"
]
} | [
"en"
] | [
"accuracy",
"macro_f1"
] |
No dataset card yet
- Downloads last month
- 5