changed to input ids
Browse files- ref_seg_ger.py +12 -2
ref_seg_ger.py
CHANGED
@@ -72,6 +72,9 @@ _FEATURES = datasets.Features(
|
|
72 |
#"original_image": datasets.features.Image(),
|
73 |
"labels": datasets.Sequence(datasets.features.ClassLabel(
|
74 |
names=list(chain.from_iterable([['B-' + x, 'I-' + x] for x in _LABELS])) + ['O']
|
|
|
|
|
|
|
75 |
))
|
76 |
# These are the features of your dataset like images, labels ...
|
77 |
}
|
@@ -212,6 +215,7 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
212 |
df = pd.read_csv(f, keep_default_na=False)
|
213 |
input_ids = []
|
214 |
labels = []
|
|
|
215 |
for i, row in df.iterrows():
|
216 |
|
217 |
#tokenized_input = row['token'].split(' ')
|
@@ -220,6 +224,8 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
220 |
continue
|
221 |
tokenized_input, offsets = zip(*tkn)
|
222 |
tokenized_input = list(tokenized_input)
|
|
|
|
|
223 |
if len(tokenized_input) > 1:
|
224 |
if row['tag'] == 'B':
|
225 |
if tokenized_input[0] == '':
|
@@ -254,15 +260,18 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
254 |
|
255 |
clean_input_ids = []
|
256 |
clean_labels = []
|
|
|
257 |
for i, input in enumerate(input_ids):
|
258 |
if input != '':
|
259 |
clean_input_ids.append(input)
|
260 |
clean_labels.append(labels[i])
|
|
|
261 |
n_chunks = int(len(clean_input_ids)/self.CHUNK_SIZE) if len(clean_input_ids)%self.CHUNK_SIZE == 0 \
|
262 |
else int(len(clean_input_ids)/self.CHUNK_SIZE) + 1
|
263 |
split_ids = np.array_split(clean_input_ids, n_chunks)
|
264 |
split_labels = np.array_split(clean_labels, n_chunks)
|
265 |
-
|
|
|
266 |
|
267 |
#for chunk_id, index in enumerate(range(0, len(clean_input_ids), self.CHUNK_SIZE)):
|
268 |
#split_ids = clean_input_ids[index:max(len(clean_input_ids), index + self.CHUNK_SIZE)]
|
@@ -294,6 +303,7 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
294 |
# "fonts": split_fonts,
|
295 |
#"image": image,
|
296 |
#"original_image": original_image,
|
297 |
-
"labels": chunk_labels
|
|
|
298 |
}
|
299 |
key += 1
|
|
|
72 |
#"original_image": datasets.features.Image(),
|
73 |
"labels": datasets.Sequence(datasets.features.ClassLabel(
|
74 |
names=list(chain.from_iterable([['B-' + x, 'I-' + x] for x in _LABELS])) + ['O']
|
75 |
+
)),
|
76 |
+
"labels_ref": datasets.Sequence(datasets.features.ClassLabel(
|
77 |
+
names=['B-ref', 'I-ref', 'O-ref']
|
78 |
))
|
79 |
# These are the features of your dataset like images, labels ...
|
80 |
}
|
|
|
215 |
df = pd.read_csv(f, keep_default_na=False)
|
216 |
input_ids = []
|
217 |
labels = []
|
218 |
+
refs = []
|
219 |
for i, row in df.iterrows():
|
220 |
|
221 |
#tokenized_input = row['token'].split(' ')
|
|
|
224 |
continue
|
225 |
tokenized_input, offsets = zip(*tkn)
|
226 |
tokenized_input = list(tokenized_input)
|
227 |
+
for t in range(len(tokenized_input)):
|
228 |
+
refs.append(row['ref'] + '-ref')
|
229 |
if len(tokenized_input) > 1:
|
230 |
if row['tag'] == 'B':
|
231 |
if tokenized_input[0] == '':
|
|
|
260 |
|
261 |
clean_input_ids = []
|
262 |
clean_labels = []
|
263 |
+
clean_refs = []
|
264 |
for i, input in enumerate(input_ids):
|
265 |
if input != '':
|
266 |
clean_input_ids.append(input)
|
267 |
clean_labels.append(labels[i])
|
268 |
+
clean_refs.append(refs[i])
|
269 |
n_chunks = int(len(clean_input_ids)/self.CHUNK_SIZE) if len(clean_input_ids)%self.CHUNK_SIZE == 0 \
|
270 |
else int(len(clean_input_ids)/self.CHUNK_SIZE) + 1
|
271 |
split_ids = np.array_split(clean_input_ids, n_chunks)
|
272 |
split_labels = np.array_split(clean_labels, n_chunks)
|
273 |
+
split_refs = np.array_split(clean_refs, n_chunks)
|
274 |
+
for chunk_ids, chunk_labels, chunk_refs in zip(split_ids, split_labels, split_refs):
|
275 |
|
276 |
#for chunk_id, index in enumerate(range(0, len(clean_input_ids), self.CHUNK_SIZE)):
|
277 |
#split_ids = clean_input_ids[index:max(len(clean_input_ids), index + self.CHUNK_SIZE)]
|
|
|
303 |
# "fonts": split_fonts,
|
304 |
#"image": image,
|
305 |
#"original_image": original_image,
|
306 |
+
"labels": chunk_labels,
|
307 |
+
"labels_ref": chunk_refs
|
308 |
}
|
309 |
key += 1
|