Theoreticallyhugo commited on
Commit
a8cb6d5
·
1 Parent(s): 1421c3e

add warning about tokenisation

Browse files
Files changed (1) hide show
  1. Stab-Gurevych-Essays.py +3 -0
Stab-Gurevych-Essays.py CHANGED
@@ -387,6 +387,9 @@ class NewDataset(datasets.GeneratorBasedBuilder):
387
  tokens = []
388
  labels = []
389
  # tokenise spans
 
 
 
390
  for span in spans:
391
  span_tokens = span[1].split()
392
  label = span[0]
 
387
  tokens = []
388
  labels = []
389
  # tokenise spans
390
+ # WARN: the old dataset considered punctuation to be separate tokens, whilst this one doesnt.
391
+ # this shouldnt matter however, since this is just pre-tokenisation, which will pre tokenised for the respective model later on.
392
+ # i assume that the later tokenisation will create equal results.
393
  for span in spans:
394
  span_tokens = span[1].split()
395
  label = span[0]