Datasets:
Dr. Jorge Abreu Vicente
commited on
Commit
•
510e511
1
Parent(s):
c157e89
update BLURB.py - biosses added
Browse files
BLURB.py
CHANGED
@@ -17,7 +17,7 @@ logger = datasets.logging.get_logger(__name__)
|
|
17 |
class BlurbConfig(datasets.BuilderConfig):
|
18 |
"""BuilderConfig for BLURB."""
|
19 |
|
20 |
-
def __init__(self, task, data_url, citation, homepage, label_classes=
|
21 |
"""BuilderConfig for BLURB.
|
22 |
Args:
|
23 |
task: `string` task the dataset is used for: 'ner', 'pico', 'rel-ext', 'sent-sim', 'doc-clas', 'qa'
|
@@ -52,6 +52,16 @@ class BlurbConfig(datasets.BuilderConfig):
|
|
52 |
"validation": f"{self.base_url}{'devel.tsv'}",
|
53 |
"test": f"{self.base_url}{'test.tsv'}"
|
54 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
|
57 |
class Blurb(datasets.GeneratorBasedBuilder):
|
@@ -90,6 +100,11 @@ class Blurb(datasets.GeneratorBasedBuilder):
|
|
90 |
description=DESCRIPTIONS['JNLPBA'],
|
91 |
citation=CITATIONS['JNLPBA'],
|
92 |
homepage=HOMEPAGES['JNLPBA']),
|
|
|
|
|
|
|
|
|
|
|
93 |
|
94 |
]
|
95 |
|
@@ -104,16 +119,13 @@ class Blurb(datasets.GeneratorBasedBuilder):
|
|
104 |
)
|
105 |
|
106 |
def _split_generators(self, dl_manager):
|
107 |
-
"""Returns SplitGenerators."""
|
108 |
-
print(self.config.base_url)
|
109 |
-
print(self.config.data_url)
|
110 |
-
for i in self.config.urls:
|
111 |
-
print(self.config.urls[i])
|
112 |
-
|
113 |
if self.config.task == 'ner':
|
114 |
downloaded_files = dl_manager.download_and_extract(self.config.urls)
|
115 |
-
|
116 |
-
|
|
|
|
|
117 |
|
118 |
|
119 |
def _generate_examples(self, filepath):
|
@@ -122,6 +134,8 @@ class Blurb(datasets.GeneratorBasedBuilder):
|
|
122 |
|
123 |
if self.config.task == 'ner':
|
124 |
return self._ner_example_generator(filepath)
|
|
|
|
|
125 |
|
126 |
def _ner_split_generator(self, downloaded_files):
|
127 |
return [
|
@@ -160,3 +174,15 @@ class Blurb(datasets.GeneratorBasedBuilder):
|
|
160 |
"tokens": tokens,
|
161 |
"ner_tags": ner_tags,
|
162 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
class BlurbConfig(datasets.BuilderConfig):
|
18 |
"""BuilderConfig for BLURB."""
|
19 |
|
20 |
+
def __init__(self, task, data_url, citation, homepage, label_classes=None, **kwargs):
|
21 |
"""BuilderConfig for BLURB.
|
22 |
Args:
|
23 |
task: `string` task the dataset is used for: 'ner', 'pico', 'rel-ext', 'sent-sim', 'doc-clas', 'qa'
|
|
|
52 |
"validation": f"{self.base_url}{'devel.tsv'}",
|
53 |
"test": f"{self.base_url}{'test.tsv'}"
|
54 |
}
|
55 |
+
if self.task == 'sent-sim':
|
56 |
+
self.features = datasets.Features(
|
57 |
+
{
|
58 |
+
"sentence1": datasets.Value("string"),
|
59 |
+
"sentence2": datasets.Value("string"),
|
60 |
+
"score": datasets.Value("float32"),
|
61 |
+
}
|
62 |
+
)
|
63 |
+
|
64 |
+
|
65 |
|
66 |
|
67 |
class Blurb(datasets.GeneratorBasedBuilder):
|
|
|
100 |
description=DESCRIPTIONS['JNLPBA'],
|
101 |
citation=CITATIONS['JNLPBA'],
|
102 |
homepage=HOMEPAGES['JNLPBA']),
|
103 |
+
BlurbConfig(name='BIOSSES', task='sent-sim', label_classes=None,
|
104 |
+
data_url = DATA_URL['BIOSSES'],
|
105 |
+
description=DESCRIPTIONS['BIOSSES'],
|
106 |
+
citation=CITATIONS['BIOSSES'],
|
107 |
+
homepage=HOMEPAGES['BIOSSES']),
|
108 |
|
109 |
]
|
110 |
|
|
|
119 |
)
|
120 |
|
121 |
def _split_generators(self, dl_manager):
|
122 |
+
"""Returns SplitGenerators."""
|
|
|
|
|
|
|
|
|
|
|
123 |
if self.config.task == 'ner':
|
124 |
downloaded_files = dl_manager.download_and_extract(self.config.urls)
|
125 |
+
return self._ner_split_generator(downloaded_files)
|
126 |
+
if self.config.task == 'sent-sim':
|
127 |
+
downloaded_file = dl_manager.download_and_extract(self.config.data_url)
|
128 |
+
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_file})]
|
129 |
|
130 |
|
131 |
def _generate_examples(self, filepath):
|
|
|
134 |
|
135 |
if self.config.task == 'ner':
|
136 |
return self._ner_example_generator(filepath)
|
137 |
+
if self.config.task == 'sent-sim':
|
138 |
+
return self._sentsim_example_generator(filepath)
|
139 |
|
140 |
def _ner_split_generator(self, downloaded_files):
|
141 |
return [
|
|
|
174 |
"tokens": tokens,
|
175 |
"ner_tags": ner_tags,
|
176 |
}
|
177 |
+
|
178 |
+
|
179 |
+
def _sentsim_example_generator(self, filepath):
|
180 |
+
"""Yields examples as (key, example) tuples."""
|
181 |
+
|
182 |
+
df = pd.read_csv(filepath, sep="\t", encoding="utf-8")
|
183 |
+
for idx, row in df.iterrows():
|
184 |
+
yield idx, {
|
185 |
+
"sentence1": row["sentence1"],
|
186 |
+
"sentence2": row["sentence2"],
|
187 |
+
"score": row["score"],
|
188 |
+
}
|