MrPotato commited on
Commit
e9d0e5a
·
1 Parent(s): 3402f2e

changed to input ids

Browse files
Files changed (1) hide show
  1. ref_seg_ger.py +27 -25
ref_seg_ger.py CHANGED
@@ -62,14 +62,14 @@ _LABELS = [
62
 
63
  _FEATURES = datasets.Features(
64
  {
65
- #"id": datasets.Value("string"),
66
  "input_ids": datasets.Sequence(datasets.Value("string")),
67
  "attention_mask": datasets.Sequence(datasets.Value("int64")),
68
- #"bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
69
  # "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
70
  # "fonts": datasets.Sequence(datasets.Value("string")),
71
- #"image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
72
- #"original_image": datasets.features.Image(),
73
  "labels": datasets.Sequence(datasets.features.ClassLabel(
74
  names=list(chain.from_iterable([['B-' + x, 'I-' + x] for x in _LABELS])) + ['O']
75
  )),
@@ -80,6 +80,7 @@ _FEATURES = datasets.Features(
80
  }
81
  )
82
 
 
83
  def load_image(image_path, size=None):
84
  image = Image.open(image_path).convert("RGB")
85
  w, h = image.size
@@ -170,7 +171,7 @@ class RefSeg(datasets.GeneratorBasedBuilder):
170
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
171
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
172
  data_dir = dl_manager.download_and_extract(_URLS)
173
- #print(data_dir)
174
  # with open(os.path.join(data_dir, "train.csv")) as f:
175
  # files_train = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']),
176
  # 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in
@@ -207,8 +208,8 @@ class RefSeg(datasets.GeneratorBasedBuilder):
207
  def _generate_examples(self, filepath, split):
208
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
209
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
210
- #print(filepath)
211
- #print(split)
212
  paths = glob(filepath + '/' + split + '/*.csv')
213
  key = 0
214
  for f in paths:
@@ -218,14 +219,17 @@ class RefSeg(datasets.GeneratorBasedBuilder):
218
  refs = []
219
  for i, row in df.iterrows():
220
 
221
- #tokenized_input = row['token'].split(' ')
222
  tkn = self.TOKENIZER.pre_tokenize_str(row['token'])
223
  if not tkn:
224
  continue
225
  tokenized_input, offsets = zip(*tkn)
226
  tokenized_input = list(tokenized_input)
227
  for t in range(len(tokenized_input)):
228
- refs.append(row['ref'] + '-ref')
 
 
 
229
  if len(tokenized_input) > 1:
230
  if row['tag'] == 'B':
231
  if tokenized_input[0] == '':
@@ -266,20 +270,19 @@ class RefSeg(datasets.GeneratorBasedBuilder):
266
  clean_input_ids.append(input)
267
  clean_labels.append(labels[i])
268
  clean_refs.append(refs[i])
269
- n_chunks = int(len(clean_input_ids)/self.CHUNK_SIZE) if len(clean_input_ids)%self.CHUNK_SIZE == 0 \
270
- else int(len(clean_input_ids)/self.CHUNK_SIZE) + 1
271
  split_ids = np.array_split(clean_input_ids, n_chunks)
272
  split_labels = np.array_split(clean_labels, n_chunks)
273
  split_refs = np.array_split(clean_refs, n_chunks)
274
  for chunk_ids, chunk_labels, chunk_refs in zip(split_ids, split_labels, split_refs):
275
-
276
- #for chunk_id, index in enumerate(range(0, len(clean_input_ids), self.CHUNK_SIZE)):
277
- #split_ids = clean_input_ids[index:max(len(clean_input_ids), index + self.CHUNK_SIZE)]
278
- #split_bboxes = bboxes[index:index + self.CHUNK_SIZE]
279
  # split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
280
  # split_fonts = fonts[index:index + self.CHUNK_SIZE]
281
- #split_labels = clean_labels[index:max(len(clean_input_ids), index + self.CHUNK_SIZE)]
282
- #split_labels_post = [item for sublist in split_labels for item in sublist]
283
  # if(len(split_ids) != len(split_labels)):
284
  # print(f)
285
  # print(len(input_ids), input_ids)
@@ -289,20 +292,19 @@ class RefSeg(datasets.GeneratorBasedBuilder):
289
  # print(f)
290
  # print(len(input_ids), input_ids)
291
  # print(len(split_labels), split_labels)
292
- #print(len(split_labels_post), split_labels_post)
293
- #print(split_labels, len(split_labels))
294
- #print(split_ids, len(split_ids))
295
-
296
 
297
  yield key, {
298
- #"id": f"{os.path.basename(f)}_{chunk_id}",
299
  'input_ids': chunk_ids,
300
  'attention_mask': [1] * len(chunk_ids),
301
- #"bbox": split_bboxes,
302
  # "RGBs": split_rgbs,
303
  # "fonts": split_fonts,
304
- #"image": image,
305
- #"original_image": original_image,
306
  "labels": chunk_labels,
307
  "labels_ref": chunk_refs
308
  }
 
62
 
63
  _FEATURES = datasets.Features(
64
  {
65
+ # "id": datasets.Value("string"),
66
  "input_ids": datasets.Sequence(datasets.Value("string")),
67
  "attention_mask": datasets.Sequence(datasets.Value("int64")),
68
+ # "bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
69
  # "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
70
  # "fonts": datasets.Sequence(datasets.Value("string")),
71
+ # "image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
72
+ # "original_image": datasets.features.Image(),
73
  "labels": datasets.Sequence(datasets.features.ClassLabel(
74
  names=list(chain.from_iterable([['B-' + x, 'I-' + x] for x in _LABELS])) + ['O']
75
  )),
 
80
  }
81
  )
82
 
83
+
84
  def load_image(image_path, size=None):
85
  image = Image.open(image_path).convert("RGB")
86
  w, h = image.size
 
171
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
172
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
173
  data_dir = dl_manager.download_and_extract(_URLS)
174
+ # print(data_dir)
175
  # with open(os.path.join(data_dir, "train.csv")) as f:
176
  # files_train = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']),
177
  # 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in
 
208
  def _generate_examples(self, filepath, split):
209
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
210
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
211
+ # print(filepath)
212
+ # print(split)
213
  paths = glob(filepath + '/' + split + '/*.csv')
214
  key = 0
215
  for f in paths:
 
219
  refs = []
220
  for i, row in df.iterrows():
221
 
222
+ # tokenized_input = row['token'].split(' ')
223
  tkn = self.TOKENIZER.pre_tokenize_str(row['token'])
224
  if not tkn:
225
  continue
226
  tokenized_input, offsets = zip(*tkn)
227
  tokenized_input = list(tokenized_input)
228
  for t in range(len(tokenized_input)):
229
+ if t == 0:
230
+ refs.append(row['ref'] + '-ref')
231
+ else:
232
+ refs.append('I-ref')
233
  if len(tokenized_input) > 1:
234
  if row['tag'] == 'B':
235
  if tokenized_input[0] == '':
 
270
  clean_input_ids.append(input)
271
  clean_labels.append(labels[i])
272
  clean_refs.append(refs[i])
273
+ n_chunks = int(len(clean_input_ids) / self.CHUNK_SIZE) if len(clean_input_ids) % self.CHUNK_SIZE == 0 \
274
+ else int(len(clean_input_ids) / self.CHUNK_SIZE) + 1
275
  split_ids = np.array_split(clean_input_ids, n_chunks)
276
  split_labels = np.array_split(clean_labels, n_chunks)
277
  split_refs = np.array_split(clean_refs, n_chunks)
278
  for chunk_ids, chunk_labels, chunk_refs in zip(split_ids, split_labels, split_refs):
279
+ # for chunk_id, index in enumerate(range(0, len(clean_input_ids), self.CHUNK_SIZE)):
280
+ # split_ids = clean_input_ids[index:max(len(clean_input_ids), index + self.CHUNK_SIZE)]
281
+ # split_bboxes = bboxes[index:index + self.CHUNK_SIZE]
 
282
  # split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
283
  # split_fonts = fonts[index:index + self.CHUNK_SIZE]
284
+ # split_labels = clean_labels[index:max(len(clean_input_ids), index + self.CHUNK_SIZE)]
285
+ # split_labels_post = [item for sublist in split_labels for item in sublist]
286
  # if(len(split_ids) != len(split_labels)):
287
  # print(f)
288
  # print(len(input_ids), input_ids)
 
292
  # print(f)
293
  # print(len(input_ids), input_ids)
294
  # print(len(split_labels), split_labels)
295
+ # print(len(split_labels_post), split_labels_post)
296
+ # print(split_labels, len(split_labels))
297
+ # print(split_ids, len(split_ids))
 
298
 
299
  yield key, {
300
+ # "id": f"{os.path.basename(f)}_{chunk_id}",
301
  'input_ids': chunk_ids,
302
  'attention_mask': [1] * len(chunk_ids),
303
+ # "bbox": split_bboxes,
304
  # "RGBs": split_rgbs,
305
  # "fonts": split_fonts,
306
+ # "image": image,
307
+ # "original_image": original_image,
308
  "labels": chunk_labels,
309
  "labels_ref": chunk_refs
310
  }