MrPotato commited on
Commit
e794ab8
·
1 Parent(s): 20db327

changed to input ids

Browse files
Files changed (1) hide show
  1. ref_seg_ger.py +29 -24
ref_seg_ger.py CHANGED
@@ -18,7 +18,7 @@ from glob import glob
18
  import os
19
  import numpy as np
20
  from PIL import Image
21
- from tokenizers.pre_tokenizers import Whitespace
22
  import datasets
23
  from itertools import chain
24
  import pandas as pd
@@ -62,7 +62,7 @@ _LABELS = [
62
  _FEATURES = datasets.Features(
63
  {
64
  #"id": datasets.Value("string"),
65
- "input_ids": datasets.Sequence(datasets.Value("string")),
66
  #"bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
67
  # "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
68
  # "fonts": datasets.Sequence(datasets.Value("string")),
@@ -136,7 +136,7 @@ class RefSeg(datasets.GeneratorBasedBuilder):
136
  # ]
137
 
138
  # DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
139
- TOKENIZER = Whitespace()
140
 
141
  def _info(self):
142
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
@@ -214,25 +214,30 @@ class RefSeg(datasets.GeneratorBasedBuilder):
214
  labels = []
215
  for i, row in df.iterrows():
216
 
217
- tokens = self.TOKENIZER.pre_tokenize_str(row['token'])
 
 
 
 
 
218
  #print(tokenized_input)
219
- if len(tokens) > 1:
220
  if row['tag'] == 'B':
221
- input_ids.append(tokens[0][0])
222
  labels.append(row['tag'] + '-' + row['label'])
223
- for input_id in tokens[1:]:
224
- input_ids.append(input_id[0])
225
  labels.append('I-' + row['label'])
226
  elif row['tag'] == 'I':
227
- for input_id in tokens:
228
- input_ids.append(input_id[0])
229
  labels.append('I-' + row['label'])
230
  else:
231
- for input_id in tokens:
232
- input_ids.append(input_id[0])
233
  labels.append('O')
234
- elif len(tokens) == 1:
235
- input_ids.append(tokens[0][0])
236
  if row['tag'] == 'O':
237
  labels.append(row['tag'])
238
  else:
@@ -247,16 +252,16 @@ class RefSeg(datasets.GeneratorBasedBuilder):
247
  # split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
248
  # split_fonts = fonts[index:index + self.CHUNK_SIZE]
249
  split_labels = labels[index:max(len(input_ids), index + self.CHUNK_SIZE)]
250
- #split_labels_post = [item for sublist in split_labels for item in sublist]
251
- # if(len(split_ids) != len(split_labels)):
252
- # print(f)
253
- # print(len(input_ids), input_ids)
254
- # print(len(split_labels), split_labels)
255
- # for s in split_labels:
256
- # if type(s) is not str:
257
- # print(f)
258
- # print(len(input_ids), input_ids)
259
- # print(len(split_labels), split_labels)
260
  #print(len(split_labels_post), split_labels_post)
261
  #print(split_labels, len(split_labels))
262
  #print(split_ids, len(split_ids))
 
18
  import os
19
  import numpy as np
20
  from PIL import Image
21
+ from transformers import AutoTokenizer
22
  import datasets
23
  from itertools import chain
24
  import pandas as pd
 
62
  _FEATURES = datasets.Features(
63
  {
64
  #"id": datasets.Value("string"),
65
+ "input_ids": datasets.Sequence(datasets.Value("int64")),
66
  #"bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
67
  # "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
68
  # "fonts": datasets.Sequence(datasets.Value("string")),
 
136
  # ]
137
 
138
  # DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
139
+ TOKENIZER = AutoTokenizer.from_pretrained("xlm-roberta-base")
140
 
141
  def _info(self):
142
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
 
214
  labels = []
215
  for i, row in df.iterrows():
216
 
217
+ tokenized_input = self.TOKENIZER(
218
+ row['token'],
219
+ add_special_tokens=False,
220
+ return_offsets_mapping=False,
221
+ return_attention_mask=False,
222
+ )
223
  #print(tokenized_input)
224
+ if len(tokenized_input['input_ids']) > 1:
225
  if row['tag'] == 'B':
226
+ input_ids.append(tokenized_input['input_ids'][0])
227
  labels.append(row['tag'] + '-' + row['label'])
228
+ for input_id in tokenized_input['input_ids'][1:]:
229
+ input_ids.append(input_id)
230
  labels.append('I-' + row['label'])
231
  elif row['tag'] == 'I':
232
+ for input_id in tokenized_input['input_ids']:
233
+ input_ids.append(input_id)
234
  labels.append('I-' + row['label'])
235
  else:
236
+ for input_id in tokenized_input['input_ids']:
237
+ input_ids.append(input_id)
238
  labels.append('O')
239
+ elif len(tokenized_input['input_ids']) == 1:
240
+ input_ids.append(tokenized_input['input_ids'][0])
241
  if row['tag'] == 'O':
242
  labels.append(row['tag'])
243
  else:
 
252
  # split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
253
  # split_fonts = fonts[index:index + self.CHUNK_SIZE]
254
  split_labels = labels[index:max(len(input_ids), index + self.CHUNK_SIZE)]
255
+ split_labels_post = [item for sublist in split_labels for item in sublist]
256
+ if(len(split_ids) != len(split_labels)):
257
+ print(f)
258
+ print(len(input_ids), input_ids)
259
+ print(len(split_labels), split_labels)
260
+ for s in split_labels:
261
+ if type(s) is not str:
262
+ print(f)
263
+ print(len(input_ids), input_ids)
264
+ print(len(split_labels), split_labels)
265
  #print(len(split_labels_post), split_labels_post)
266
  #print(split_labels, len(split_labels))
267
  #print(split_ids, len(split_ids))