File size: 1,550 Bytes
cfc0d0a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import json
from collections import defaultdict
from enum import Enum
from types import SimpleNamespace

from dataclasses import dataclass
import datasets

from licenses import License
from licenses import Licenses


BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")


@dataclass
class BigBioConfig(datasets.BuilderConfig):
    """BuilderConfig for BigBio."""

    name: str = None
    version: datasets.Version = None
    description: str = None
    schema: str = None
    subset_id: str = None




# shamelessly compied from:
# https://github.com/huggingface/datasets/blob/master/src/datasets/utils/metadata.py
langs_json = json.load(open("languages.json", "r"))
langs_dict = {k.replace("-", "_").upper(): v for k, v in langs_json.items()}
Lang = Enum("Lang", langs_dict)


METADATA: dict = {
    "_LOCAL": bool,
    "_LANGUAGES": Lang,
    "_PUBMED": bool,
    "_LICENSE": License,
    "_DISPLAYNAME": str,
}


class Tasks(Enum):
    NAMED_ENTITY_RECOGNITION = "NER"
    NAMED_ENTITY_DISAMBIGUATION = "NED"
    EVENT_EXTRACTION = "EE"
    RELATION_EXTRACTION = "RE"
    COREFERENCE_RESOLUTION = "COREF"
    QUESTION_ANSWERING = "QA"
    TEXTUAL_ENTAILMENT = "TE"
    SEMANTIC_SIMILARITY = "STS"
    PARAPHRASING = "PARA"
    TRANSLATION = "TRANSL"
    SUMMARIZATION = "SUM"
    TEXT_CLASSIFICATION = "TXTCLASS"


entailment_features = datasets.Features(
    {
        "id": datasets.Value("string"),
        "premise": datasets.Value("string"),
        "hypothesis": datasets.Value("string"),
        "label": datasets.Value("string"),
    }
)