MrPotato commited on
Commit
78bf5a8
·
1 Parent(s): dcc1ce9

changed chunk generation

Browse files
Files changed (1) hide show
  1. ref_seg_ger.py +1 -8
ref_seg_ger.py CHANGED
@@ -207,22 +207,15 @@ class RefSeg(datasets.GeneratorBasedBuilder):
207
  #print(filepath)
208
  #print(split)
209
  paths = glob(filepath + '/' + split + '/*.csv')
210
- print(paths)
211
  key = 0
212
  for f in paths:
213
- print(f)
214
  df = pd.read_csv(f, keep_default_na=False)
215
  input_ids = []
216
  labels = []
217
  for i, row in df.iterrows():
218
 
219
  #tokenized_input = row['token'].split(' ')
220
- print(row['token'])
221
- print(self.TOKENIZER.pre_tokenize(row['token']))
222
- tokenized_input, offsets = zip(*self.TOKENIZER.pre_tokenize(row['token']))
223
- print(tokenized_input)
224
- if f.endswith('Cermaine_0.xml.csv'):
225
- print(tokenized_input)
226
  if len(tokenized_input) > 1:
227
  if row['tag'] == 'B':
228
  if tokenized_input[0] == '':
 
207
  #print(filepath)
208
  #print(split)
209
  paths = glob(filepath + '/' + split + '/*.csv')
 
210
  key = 0
211
  for f in paths:
 
212
  df = pd.read_csv(f, keep_default_na=False)
213
  input_ids = []
214
  labels = []
215
  for i, row in df.iterrows():
216
 
217
  #tokenized_input = row['token'].split(' ')
218
+ tokenized_input, offsets = zip(*self.TOKENIZER.pre_tokenize_str(row['token']))
 
 
 
 
 
219
  if len(tokenized_input) > 1:
220
  if row['tag'] == 'B':
221
  if tokenized_input[0] == '':